blob: 9a89c10a55f0c0c5919638d34a2d065f86a80a98 [file] [log] [blame]
Pavel Emelyanov52b7c592011-12-09 06:23:51 +00001/*
2 * udp_diag.c Module for monitoring UDP transport protocols sockets.
3 *
4 * Authors: Pavel Emelyanov, <xemul@parallels.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12
13#include <linux/module.h>
14#include <linux/inet_diag.h>
15#include <linux/udp.h>
16#include <net/udp.h>
17#include <net/udplite.h>
Pavel Emelyanov52b7c592011-12-09 06:23:51 +000018#include <linux/sock_diag.h>
19
Pavel Emelyanovb6d640c2011-12-09 06:24:21 +000020static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
Eric Dumazet34160ea2015-03-10 07:15:54 -070021 struct netlink_callback *cb,
22 const struct inet_diag_req_v2 *req,
Lorenzo Colittid545cac2016-09-08 00:42:25 +090023 struct nlattr *bc, bool net_admin)
Pavel Emelyanovb6d640c2011-12-09 06:24:21 +000024{
25 if (!inet_diag_bc_sk(bc, sk))
26 return 0;
27
Eric W. Biedermand06ca952012-05-24 17:58:08 -060028 return inet_sk_diag_fill(sk, NULL, skb, req,
Patrick McHardye32123e2013-04-17 06:46:57 +000029 sk_user_ns(NETLINK_CB(cb->skb).sk),
Eric W. Biederman15e47302012-09-07 20:12:54 +000030 NETLINK_CB(cb->skb).portid,
Lorenzo Colittid545cac2016-09-08 00:42:25 +090031 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh, net_admin);
Pavel Emelyanovb6d640c2011-12-09 06:24:21 +000032}
33
Pavel Emelyanov52b7c592011-12-09 06:23:51 +000034static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
Eric Dumazet34160ea2015-03-10 07:15:54 -070035 const struct nlmsghdr *nlh,
36 const struct inet_diag_req_v2 *req)
Pavel Emelyanov52b7c592011-12-09 06:23:51 +000037{
Pavel Emelyanova925aa02011-12-09 06:24:06 +000038 int err = -EINVAL;
Eric Dumazetca065d02016-04-01 08:52:13 -070039 struct sock *sk = NULL;
Pavel Emelyanova925aa02011-12-09 06:24:06 +000040 struct sk_buff *rep;
Andrey Vagin51d7ccc2012-07-16 04:28:49 +000041 struct net *net = sock_net(in_skb->sk);
Pavel Emelyanova925aa02011-12-09 06:24:06 +000042
Eric Dumazetca065d02016-04-01 08:52:13 -070043 rcu_read_lock();
Pavel Emelyanova925aa02011-12-09 06:24:06 +000044 if (req->sdiag_family == AF_INET)
Andrey Vagin51d7ccc2012-07-16 04:28:49 +000045 sk = __udp4_lib_lookup(net,
Pavel Emelyanova925aa02011-12-09 06:24:06 +000046 req->id.idiag_src[0], req->id.idiag_sport,
47 req->id.idiag_dst[0], req->id.idiag_dport,
Craig Gallek538950a2016-01-04 17:41:47 -050048 req->id.idiag_if, tbl, NULL);
Pavel Emelyanov86e62ad2011-12-09 23:35:07 +000049#if IS_ENABLED(CONFIG_IPV6)
Pavel Emelyanova925aa02011-12-09 06:24:06 +000050 else if (req->sdiag_family == AF_INET6)
Andrey Vagin51d7ccc2012-07-16 04:28:49 +000051 sk = __udp6_lib_lookup(net,
Pavel Emelyanova925aa02011-12-09 06:24:06 +000052 (struct in6_addr *)req->id.idiag_src,
53 req->id.idiag_sport,
54 (struct in6_addr *)req->id.idiag_dst,
55 req->id.idiag_dport,
Craig Gallek538950a2016-01-04 17:41:47 -050056 req->id.idiag_if, tbl, NULL);
Pavel Emelyanov86e62ad2011-12-09 23:35:07 +000057#endif
Eric Dumazetca065d02016-04-01 08:52:13 -070058 if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
59 sk = NULL;
60 rcu_read_unlock();
Pavel Emelyanova925aa02011-12-09 06:24:06 +000061 err = -ENOENT;
Ian Morris51456b22015-04-03 09:17:26 +010062 if (!sk)
Pavel Emelyanova925aa02011-12-09 06:24:06 +000063 goto out_nosk;
64
Pavel Emelyanovf65c1b52011-12-15 02:43:44 +000065 err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
Pavel Emelyanova925aa02011-12-09 06:24:06 +000066 if (err)
67 goto out;
68
69 err = -ENOMEM;
Hong zhi guo573ce262013-03-27 06:47:04 +000070 rep = nlmsg_new(sizeof(struct inet_diag_msg) +
71 sizeof(struct inet_diag_meminfo) + 64,
72 GFP_KERNEL);
Pavel Emelyanova925aa02011-12-09 06:24:06 +000073 if (!rep)
74 goto out;
75
76 err = inet_sk_diag_fill(sk, NULL, rep, req,
Patrick McHardye32123e2013-04-17 06:46:57 +000077 sk_user_ns(NETLINK_CB(in_skb).sk),
Eric W. Biederman15e47302012-09-07 20:12:54 +000078 NETLINK_CB(in_skb).portid,
Lorenzo Colittid545cac2016-09-08 00:42:25 +090079 nlh->nlmsg_seq, 0, nlh,
80 netlink_net_capable(in_skb, CAP_NET_ADMIN));
Pavel Emelyanova925aa02011-12-09 06:24:06 +000081 if (err < 0) {
82 WARN_ON(err == -EMSGSIZE);
83 kfree_skb(rep);
84 goto out;
85 }
Eric W. Biederman15e47302012-09-07 20:12:54 +000086 err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
Pavel Emelyanova925aa02011-12-09 06:24:06 +000087 MSG_DONTWAIT);
88 if (err > 0)
89 err = 0;
90out:
91 if (sk)
92 sock_put(sk);
93out_nosk:
94 return err;
Pavel Emelyanov52b7c592011-12-09 06:23:51 +000095}
96
Eric Dumazet34160ea2015-03-10 07:15:54 -070097static void udp_dump(struct udp_table *table, struct sk_buff *skb,
98 struct netlink_callback *cb,
99 const struct inet_diag_req_v2 *r, struct nlattr *bc)
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000100{
Lorenzo Colittid545cac2016-09-08 00:42:25 +0900101 bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
Andrey Vagin51d7ccc2012-07-16 04:28:49 +0000102 struct net *net = sock_net(skb->sk);
Eric Dumazetca065d02016-04-01 08:52:13 -0700103 int num, s_num, slot, s_slot;
Pavel Emelyanovb6d640c2011-12-09 06:24:21 +0000104
105 s_slot = cb->args[0];
106 num = s_num = cb->args[1];
107
Herbert Xu86f3cdd2015-01-24 08:02:40 +1100108 for (slot = s_slot; slot <= table->mask; s_num = 0, slot++) {
Pavel Emelyanovb6d640c2011-12-09 06:24:21 +0000109 struct udp_hslot *hslot = &table->hash[slot];
Eric Dumazetca065d02016-04-01 08:52:13 -0700110 struct sock *sk;
Pavel Emelyanovb6d640c2011-12-09 06:24:21 +0000111
Herbert Xu86f3cdd2015-01-24 08:02:40 +1100112 num = 0;
113
Eric Dumazetca065d02016-04-01 08:52:13 -0700114 if (hlist_empty(&hslot->head))
Pavel Emelyanovb6d640c2011-12-09 06:24:21 +0000115 continue;
116
117 spin_lock_bh(&hslot->lock);
Eric Dumazetca065d02016-04-01 08:52:13 -0700118 sk_for_each(sk, &hslot->head) {
Pavel Emelyanovb6d640c2011-12-09 06:24:21 +0000119 struct inet_sock *inet = inet_sk(sk);
120
Andrey Vagin51d7ccc2012-07-16 04:28:49 +0000121 if (!net_eq(sock_net(sk), net))
122 continue;
Pavel Emelyanovb6d640c2011-12-09 06:24:21 +0000123 if (num < s_num)
124 goto next;
125 if (!(r->idiag_states & (1 << sk->sk_state)))
126 goto next;
127 if (r->sdiag_family != AF_UNSPEC &&
128 sk->sk_family != r->sdiag_family)
129 goto next;
130 if (r->id.idiag_sport != inet->inet_sport &&
131 r->id.idiag_sport)
132 goto next;
133 if (r->id.idiag_dport != inet->inet_dport &&
134 r->id.idiag_dport)
135 goto next;
136
Lorenzo Colittid545cac2016-09-08 00:42:25 +0900137 if (sk_diag_dump(sk, skb, cb, r, bc, net_admin) < 0) {
Pavel Emelyanovb6d640c2011-12-09 06:24:21 +0000138 spin_unlock_bh(&hslot->lock);
139 goto done;
140 }
141next:
142 num++;
143 }
144 spin_unlock_bh(&hslot->lock);
145 }
146done:
147 cb->args[0] = slot;
148 cb->args[1] = num;
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000149}
150
151static void udp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
Eric Dumazet34160ea2015-03-10 07:15:54 -0700152 const struct inet_diag_req_v2 *r, struct nlattr *bc)
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000153{
154 udp_dump(&udp_table, skb, cb, r, bc);
155}
156
157static int udp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
Eric Dumazet34160ea2015-03-10 07:15:54 -0700158 const struct inet_diag_req_v2 *req)
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000159{
160 return udp_dump_one(&udp_table, in_skb, nlh, req);
161}
162
Shan Wei62ad6fc2012-04-24 18:15:41 +0000163static void udp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
164 void *info)
165{
166 r->idiag_rqueue = sk_rmem_alloc_get(sk);
167 r->idiag_wqueue = sk_wmem_alloc_get(sk);
168}
169
David Ahern5d77dca2016-08-23 21:06:33 -0700170#ifdef CONFIG_INET_DIAG_DESTROY
171static int __udp_diag_destroy(struct sk_buff *in_skb,
172 const struct inet_diag_req_v2 *req,
173 struct udp_table *tbl)
174{
175 struct net *net = sock_net(in_skb->sk);
176 struct sock *sk;
177 int err;
178
179 rcu_read_lock();
180
181 if (req->sdiag_family == AF_INET)
182 sk = __udp4_lib_lookup(net,
183 req->id.idiag_dst[0], req->id.idiag_dport,
184 req->id.idiag_src[0], req->id.idiag_sport,
185 req->id.idiag_if, tbl, NULL);
186#if IS_ENABLED(CONFIG_IPV6)
187 else if (req->sdiag_family == AF_INET6) {
188 if (ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_dst) &&
189 ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_src))
190 sk = __udp4_lib_lookup(net,
Lorenzo Colittif95bf342016-09-07 13:38:35 +0900191 req->id.idiag_dst[3], req->id.idiag_dport,
192 req->id.idiag_src[3], req->id.idiag_sport,
David Ahern5d77dca2016-08-23 21:06:33 -0700193 req->id.idiag_if, tbl, NULL);
194
195 else
196 sk = __udp6_lib_lookup(net,
197 (struct in6_addr *)req->id.idiag_dst,
198 req->id.idiag_dport,
199 (struct in6_addr *)req->id.idiag_src,
200 req->id.idiag_sport,
201 req->id.idiag_if, tbl, NULL);
202 }
203#endif
204 else {
205 rcu_read_unlock();
206 return -EINVAL;
207 }
208
209 if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
210 sk = NULL;
211
212 rcu_read_unlock();
213
214 if (!sk)
215 return -ENOENT;
216
217 if (sock_diag_check_cookie(sk, req->id.idiag_cookie)) {
218 sock_put(sk);
219 return -ENOENT;
220 }
221
222 err = sock_diag_destroy(sk, ECONNABORTED);
223
224 sock_put(sk);
225
226 return err;
227}
228
229static int udp_diag_destroy(struct sk_buff *in_skb,
230 const struct inet_diag_req_v2 *req)
231{
232 return __udp_diag_destroy(in_skb, req, &udp_table);
233}
234
235static int udplite_diag_destroy(struct sk_buff *in_skb,
236 const struct inet_diag_req_v2 *req)
237{
238 return __udp_diag_destroy(in_skb, req, &udplite_table);
239}
240
241#endif
242
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000243static const struct inet_diag_handler udp_diag_handler = {
244 .dump = udp_diag_dump,
245 .dump_one = udp_diag_dump_one,
Shan Wei62ad6fc2012-04-24 18:15:41 +0000246 .idiag_get_info = udp_diag_get_info,
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000247 .idiag_type = IPPROTO_UDP,
Craig Gallek3fd22af2015-06-15 11:26:19 -0400248 .idiag_info_size = 0,
David Ahern5d77dca2016-08-23 21:06:33 -0700249#ifdef CONFIG_INET_DIAG_DESTROY
250 .destroy = udp_diag_destroy,
251#endif
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000252};
253
254static void udplite_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
Eric Dumazet34160ea2015-03-10 07:15:54 -0700255 const struct inet_diag_req_v2 *r,
256 struct nlattr *bc)
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000257{
258 udp_dump(&udplite_table, skb, cb, r, bc);
259}
260
261static int udplite_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
Eric Dumazet34160ea2015-03-10 07:15:54 -0700262 const struct inet_diag_req_v2 *req)
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000263{
264 return udp_dump_one(&udplite_table, in_skb, nlh, req);
265}
266
267static const struct inet_diag_handler udplite_diag_handler = {
268 .dump = udplite_diag_dump,
269 .dump_one = udplite_diag_dump_one,
Shan Wei62ad6fc2012-04-24 18:15:41 +0000270 .idiag_get_info = udp_diag_get_info,
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000271 .idiag_type = IPPROTO_UDPLITE,
Craig Gallek3fd22af2015-06-15 11:26:19 -0400272 .idiag_info_size = 0,
David Ahern5d77dca2016-08-23 21:06:33 -0700273#ifdef CONFIG_INET_DIAG_DESTROY
274 .destroy = udplite_diag_destroy,
275#endif
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000276};
277
278static int __init udp_diag_init(void)
279{
280 int err;
281
282 err = inet_diag_register(&udp_diag_handler);
283 if (err)
284 goto out;
285 err = inet_diag_register(&udplite_diag_handler);
286 if (err)
287 goto out_lite;
288out:
289 return err;
290out_lite:
291 inet_diag_unregister(&udp_diag_handler);
292 goto out;
293}
294
295static void __exit udp_diag_exit(void)
296{
297 inet_diag_unregister(&udplite_diag_handler);
298 inet_diag_unregister(&udp_diag_handler);
299}
300
301module_init(udp_diag_init);
302module_exit(udp_diag_exit);
303MODULE_LICENSE("GPL");
Pavel Emelyanovaec8dc622011-12-15 02:43:27 +0000304MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-17 /* AF_INET - IPPROTO_UDP */);
305MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-136 /* AF_INET - IPPROTO_UDPLITE */);