blob: 24c7a65d9cb186528af23d66639110fa0f642a4f [file] [log] [blame]
Pavel Emelyanov22931d32011-12-15 02:44:35 +00001#include <linux/types.h>
2#include <linux/spinlock.h>
3#include <linux/sock_diag.h>
4#include <linux/unix_diag.h>
5#include <linux/skbuff.h>
6#include <net/netlink.h>
7#include <net/af_unix.h>
8#include <net/tcp_states.h>
9
10#define UNIX_DIAG_PUT(skb, attrtype, attrlen) \
11 RTA_DATA(__RTA_PUT(skb, attrtype, attrlen))
12
Pavel Emelyanovf5248b42011-12-15 02:45:24 +000013static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb)
14{
15 struct unix_address *addr = unix_sk(sk)->addr;
16 char *s;
17
18 if (addr) {
19 s = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_NAME, addr->len - sizeof(short));
20 memcpy(s, addr->name->sun_path, addr->len - sizeof(short));
21 }
22
23 return 0;
24
25rtattr_failure:
26 return -EMSGSIZE;
27}
28
Pavel Emelyanov5f7b0562011-12-15 02:45:43 +000029static int sk_diag_dump_vfs(struct sock *sk, struct sk_buff *nlskb)
30{
31 struct dentry *dentry = unix_sk(sk)->dentry;
32 struct unix_diag_vfs *uv;
33
34 if (dentry) {
35 uv = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_VFS, sizeof(*uv));
36 uv->udiag_vfs_ino = dentry->d_inode->i_ino;
37 uv->udiag_vfs_dev = dentry->d_sb->s_dev;
38 }
39
40 return 0;
41
42rtattr_failure:
43 return -EMSGSIZE;
44}
45
Pavel Emelyanovac02be82011-12-15 02:45:58 +000046static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb)
47{
48 struct sock *peer;
49 int ino;
50
51 peer = unix_peer_get(sk);
52 if (peer) {
53 unix_state_lock(peer);
54 ino = sock_i_ino(peer);
55 unix_state_unlock(peer);
56 sock_put(peer);
57
58 RTA_PUT_U32(nlskb, UNIX_DIAG_PEER, ino);
59 }
60
61 return 0;
62rtattr_failure:
63 return -EMSGSIZE;
64}
65
Pavel Emelyanov2aac7a22011-12-15 02:46:14 +000066static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
67{
68 struct sk_buff *skb;
69 u32 *buf;
70 int i;
71
72 if (sk->sk_state == TCP_LISTEN) {
73 spin_lock(&sk->sk_receive_queue.lock);
74 buf = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_ICONS, sk->sk_receive_queue.qlen);
75 i = 0;
76 skb_queue_walk(&sk->sk_receive_queue, skb) {
77 struct sock *req, *peer;
78
79 req = skb->sk;
80 /*
81 * The state lock is outer for the same sk's
82 * queue lock. With the other's queue locked it's
83 * OK to lock the state.
84 */
85 unix_state_lock_nested(req);
86 peer = unix_sk(req)->peer;
87 if (peer)
88 buf[i++] = sock_i_ino(peer);
89 unix_state_unlock(req);
90 }
91 spin_unlock(&sk->sk_receive_queue.lock);
92 }
93
94 return 0;
95
96rtattr_failure:
97 spin_unlock(&sk->sk_receive_queue.lock);
98 return -EMSGSIZE;
99}
100
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000101static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
102 u32 pid, u32 seq, u32 flags, int sk_ino)
103{
104 unsigned char *b = skb_tail_pointer(skb);
105 struct nlmsghdr *nlh;
106 struct unix_diag_msg *rep;
107
108 nlh = NLMSG_PUT(skb, pid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep));
109 nlh->nlmsg_flags = flags;
110
111 rep = NLMSG_DATA(nlh);
112
113 rep->udiag_family = AF_UNIX;
114 rep->udiag_type = sk->sk_type;
115 rep->udiag_state = sk->sk_state;
116 rep->udiag_ino = sk_ino;
117 sock_diag_save_cookie(sk, rep->udiag_cookie);
118
Pavel Emelyanovf5248b42011-12-15 02:45:24 +0000119 if ((req->udiag_show & UDIAG_SHOW_NAME) &&
120 sk_diag_dump_name(sk, skb))
121 goto nlmsg_failure;
122
Pavel Emelyanov5f7b0562011-12-15 02:45:43 +0000123 if ((req->udiag_show & UDIAG_SHOW_VFS) &&
124 sk_diag_dump_vfs(sk, skb))
125 goto nlmsg_failure;
126
Pavel Emelyanovac02be82011-12-15 02:45:58 +0000127 if ((req->udiag_show & UDIAG_SHOW_PEER) &&
128 sk_diag_dump_peer(sk, skb))
129 goto nlmsg_failure;
130
Pavel Emelyanov2aac7a22011-12-15 02:46:14 +0000131 if ((req->udiag_show & UDIAG_SHOW_ICONS) &&
132 sk_diag_dump_icons(sk, skb))
133 goto nlmsg_failure;
134
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000135 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
136 return skb->len;
137
138nlmsg_failure:
139 nlmsg_trim(skb, b);
140 return -EMSGSIZE;
141}
142
143static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
144 u32 pid, u32 seq, u32 flags)
145{
146 int sk_ino;
147
148 unix_state_lock(sk);
149 sk_ino = sock_i_ino(sk);
150 unix_state_unlock(sk);
151
152 if (!sk_ino)
153 return 0;
154
155 return sk_diag_fill(sk, skb, req, pid, seq, flags, sk_ino);
156}
157
Pavel Emelyanov22931d32011-12-15 02:44:35 +0000158static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
159{
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000160 struct unix_diag_req *req;
161 int num, s_num, slot, s_slot;
162
163 req = NLMSG_DATA(cb->nlh);
164
165 s_slot = cb->args[0];
166 num = s_num = cb->args[1];
167
168 spin_lock(&unix_table_lock);
169 for (slot = s_slot; slot <= UNIX_HASH_SIZE; s_num = 0, slot++) {
170 struct sock *sk;
171 struct hlist_node *node;
172
173 num = 0;
174 sk_for_each(sk, node, &unix_socket_table[slot]) {
175 if (num < s_num)
176 goto next;
177 if (!(req->udiag_states & (1 << sk->sk_state)))
178 goto next;
179 if (sk_diag_dump(sk, skb, req,
180 NETLINK_CB(cb->skb).pid,
181 cb->nlh->nlmsg_seq,
182 NLM_F_MULTI) < 0)
183 goto done;
184next:
185 num++;
186 }
187 }
188done:
189 spin_unlock(&unix_table_lock);
190 cb->args[0] = slot;
191 cb->args[1] = num;
192
193 return skb->len;
Pavel Emelyanov22931d32011-12-15 02:44:35 +0000194}
195
Pavel Emelyanov5d3cae82011-12-15 02:45:07 +0000196static struct sock *unix_lookup_by_ino(int ino)
197{
198 int i;
199 struct sock *sk;
200
201 spin_lock(&unix_table_lock);
202 for (i = 0; i <= UNIX_HASH_SIZE; i++) {
203 struct hlist_node *node;
204
205 sk_for_each(sk, node, &unix_socket_table[i])
206 if (ino == sock_i_ino(sk)) {
207 sock_hold(sk);
208 spin_unlock(&unix_table_lock);
209
210 return sk;
211 }
212 }
213
214 spin_unlock(&unix_table_lock);
215 return NULL;
216}
217
Pavel Emelyanov22931d32011-12-15 02:44:35 +0000218static int unix_diag_get_exact(struct sk_buff *in_skb,
219 const struct nlmsghdr *nlh,
220 struct unix_diag_req *req)
221{
Pavel Emelyanov5d3cae82011-12-15 02:45:07 +0000222 int err = -EINVAL;
223 struct sock *sk;
224 struct sk_buff *rep;
225 unsigned int extra_len;
226
227 if (req->udiag_ino == 0)
228 goto out_nosk;
229
230 sk = unix_lookup_by_ino(req->udiag_ino);
231 err = -ENOENT;
232 if (sk == NULL)
233 goto out_nosk;
234
235 err = sock_diag_check_cookie(sk, req->udiag_cookie);
236 if (err)
237 goto out;
238
239 extra_len = 256;
240again:
241 err = -ENOMEM;
242 rep = alloc_skb(NLMSG_SPACE((sizeof(struct unix_diag_msg) + extra_len)),
243 GFP_KERNEL);
244 if (!rep)
245 goto out;
246
247 err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).pid,
248 nlh->nlmsg_seq, 0, req->udiag_ino);
249 if (err < 0) {
250 kfree_skb(rep);
251 extra_len += 256;
252 if (extra_len >= PAGE_SIZE)
253 goto out;
254
255 goto again;
256 }
257 err = netlink_unicast(sock_diag_nlsk, rep, NETLINK_CB(in_skb).pid,
258 MSG_DONTWAIT);
259 if (err > 0)
260 err = 0;
261out:
262 if (sk)
263 sock_put(sk);
264out_nosk:
265 return err;
Pavel Emelyanov22931d32011-12-15 02:44:35 +0000266}
267
268static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
269{
270 int hdrlen = sizeof(struct unix_diag_req);
271
272 if (nlmsg_len(h) < hdrlen)
273 return -EINVAL;
274
275 if (h->nlmsg_flags & NLM_F_DUMP)
276 return netlink_dump_start(sock_diag_nlsk, skb, h,
277 unix_diag_dump, NULL, 0);
278 else
279 return unix_diag_get_exact(skb, h, (struct unix_diag_req *)NLMSG_DATA(h));
280}
281
282static struct sock_diag_handler unix_diag_handler = {
283 .family = AF_UNIX,
284 .dump = unix_diag_handler_dump,
285};
286
287static int __init unix_diag_init(void)
288{
289 return sock_diag_register(&unix_diag_handler);
290}
291
292static void __exit unix_diag_exit(void)
293{
294 sock_diag_unregister(&unix_diag_handler);
295}
296
297module_init(unix_diag_init);
298module_exit(unix_diag_exit);
299MODULE_LICENSE("GPL");
300MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 1 /* AF_LOCAL */);