blob: 8a9f6e535caa096522fb4e487efb4b3e6ef6483d [file] [log] [blame]
Pavel Emelyanov52b7c592011-12-09 06:23:51 +00001/*
2 * udp_diag.c Module for monitoring UDP transport protocols sockets.
3 *
4 * Authors: Pavel Emelyanov, <xemul@parallels.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12
13#include <linux/module.h>
14#include <linux/inet_diag.h>
15#include <linux/udp.h>
16#include <net/udp.h>
17#include <net/udplite.h>
Pavel Emelyanov52b7c592011-12-09 06:23:51 +000018#include <linux/sock_diag.h>
19
Pavel Emelyanovb6d640c2011-12-09 06:24:21 +000020static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
Eric Dumazet34160ea2015-03-10 07:15:54 -070021 struct netlink_callback *cb,
22 const struct inet_diag_req_v2 *req,
23 struct nlattr *bc)
Pavel Emelyanovb6d640c2011-12-09 06:24:21 +000024{
25 if (!inet_diag_bc_sk(bc, sk))
26 return 0;
27
Eric W. Biedermand06ca952012-05-24 17:58:08 -060028 return inet_sk_diag_fill(sk, NULL, skb, req,
Patrick McHardye32123e2013-04-17 06:46:57 +000029 sk_user_ns(NETLINK_CB(cb->skb).sk),
Eric W. Biederman15e47302012-09-07 20:12:54 +000030 NETLINK_CB(cb->skb).portid,
Pavel Emelyanovb6d640c2011-12-09 06:24:21 +000031 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
32}
33
Pavel Emelyanov52b7c592011-12-09 06:23:51 +000034static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
Eric Dumazet34160ea2015-03-10 07:15:54 -070035 const struct nlmsghdr *nlh,
36 const struct inet_diag_req_v2 *req)
Pavel Emelyanov52b7c592011-12-09 06:23:51 +000037{
Pavel Emelyanova925aa02011-12-09 06:24:06 +000038 int err = -EINVAL;
Eric Dumazetca065d02016-04-01 08:52:13 -070039 struct sock *sk = NULL;
Pavel Emelyanova925aa02011-12-09 06:24:06 +000040 struct sk_buff *rep;
Andrey Vagin51d7ccc2012-07-16 04:28:49 +000041 struct net *net = sock_net(in_skb->sk);
Pavel Emelyanova925aa02011-12-09 06:24:06 +000042
Eric Dumazetca065d02016-04-01 08:52:13 -070043 rcu_read_lock();
Pavel Emelyanova925aa02011-12-09 06:24:06 +000044 if (req->sdiag_family == AF_INET)
Andrey Vagin51d7ccc2012-07-16 04:28:49 +000045 sk = __udp4_lib_lookup(net,
Pavel Emelyanova925aa02011-12-09 06:24:06 +000046 req->id.idiag_src[0], req->id.idiag_sport,
47 req->id.idiag_dst[0], req->id.idiag_dport,
Craig Gallek538950a2016-01-04 17:41:47 -050048 req->id.idiag_if, tbl, NULL);
Pavel Emelyanov86e62ad2011-12-09 23:35:07 +000049#if IS_ENABLED(CONFIG_IPV6)
Pavel Emelyanova925aa02011-12-09 06:24:06 +000050 else if (req->sdiag_family == AF_INET6)
Andrey Vagin51d7ccc2012-07-16 04:28:49 +000051 sk = __udp6_lib_lookup(net,
Pavel Emelyanova925aa02011-12-09 06:24:06 +000052 (struct in6_addr *)req->id.idiag_src,
53 req->id.idiag_sport,
54 (struct in6_addr *)req->id.idiag_dst,
55 req->id.idiag_dport,
Craig Gallek538950a2016-01-04 17:41:47 -050056 req->id.idiag_if, tbl, NULL);
Pavel Emelyanov86e62ad2011-12-09 23:35:07 +000057#endif
Eric Dumazetca065d02016-04-01 08:52:13 -070058 if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
59 sk = NULL;
60 rcu_read_unlock();
Pavel Emelyanova925aa02011-12-09 06:24:06 +000061 err = -ENOENT;
Ian Morris51456b22015-04-03 09:17:26 +010062 if (!sk)
Pavel Emelyanova925aa02011-12-09 06:24:06 +000063 goto out_nosk;
64
Pavel Emelyanovf65c1b52011-12-15 02:43:44 +000065 err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
Pavel Emelyanova925aa02011-12-09 06:24:06 +000066 if (err)
67 goto out;
68
69 err = -ENOMEM;
Hong zhi guo573ce262013-03-27 06:47:04 +000070 rep = nlmsg_new(sizeof(struct inet_diag_msg) +
71 sizeof(struct inet_diag_meminfo) + 64,
72 GFP_KERNEL);
Pavel Emelyanova925aa02011-12-09 06:24:06 +000073 if (!rep)
74 goto out;
75
76 err = inet_sk_diag_fill(sk, NULL, rep, req,
Patrick McHardye32123e2013-04-17 06:46:57 +000077 sk_user_ns(NETLINK_CB(in_skb).sk),
Eric W. Biederman15e47302012-09-07 20:12:54 +000078 NETLINK_CB(in_skb).portid,
Pavel Emelyanova925aa02011-12-09 06:24:06 +000079 nlh->nlmsg_seq, 0, nlh);
80 if (err < 0) {
81 WARN_ON(err == -EMSGSIZE);
82 kfree_skb(rep);
83 goto out;
84 }
Eric W. Biederman15e47302012-09-07 20:12:54 +000085 err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
Pavel Emelyanova925aa02011-12-09 06:24:06 +000086 MSG_DONTWAIT);
87 if (err > 0)
88 err = 0;
89out:
90 if (sk)
91 sock_put(sk);
92out_nosk:
93 return err;
Pavel Emelyanov52b7c592011-12-09 06:23:51 +000094}
95
Eric Dumazet34160ea2015-03-10 07:15:54 -070096static void udp_dump(struct udp_table *table, struct sk_buff *skb,
97 struct netlink_callback *cb,
98 const struct inet_diag_req_v2 *r, struct nlattr *bc)
Pavel Emelyanov52b7c592011-12-09 06:23:51 +000099{
Andrey Vagin51d7ccc2012-07-16 04:28:49 +0000100 struct net *net = sock_net(skb->sk);
Eric Dumazetca065d02016-04-01 08:52:13 -0700101 int num, s_num, slot, s_slot;
Pavel Emelyanovb6d640c2011-12-09 06:24:21 +0000102
103 s_slot = cb->args[0];
104 num = s_num = cb->args[1];
105
Herbert Xu86f3cdd2015-01-24 08:02:40 +1100106 for (slot = s_slot; slot <= table->mask; s_num = 0, slot++) {
Pavel Emelyanovb6d640c2011-12-09 06:24:21 +0000107 struct udp_hslot *hslot = &table->hash[slot];
Eric Dumazetca065d02016-04-01 08:52:13 -0700108 struct sock *sk;
Pavel Emelyanovb6d640c2011-12-09 06:24:21 +0000109
Herbert Xu86f3cdd2015-01-24 08:02:40 +1100110 num = 0;
111
Eric Dumazetca065d02016-04-01 08:52:13 -0700112 if (hlist_empty(&hslot->head))
Pavel Emelyanovb6d640c2011-12-09 06:24:21 +0000113 continue;
114
115 spin_lock_bh(&hslot->lock);
Eric Dumazetca065d02016-04-01 08:52:13 -0700116 sk_for_each(sk, &hslot->head) {
Pavel Emelyanovb6d640c2011-12-09 06:24:21 +0000117 struct inet_sock *inet = inet_sk(sk);
118
Andrey Vagin51d7ccc2012-07-16 04:28:49 +0000119 if (!net_eq(sock_net(sk), net))
120 continue;
Pavel Emelyanovb6d640c2011-12-09 06:24:21 +0000121 if (num < s_num)
122 goto next;
123 if (!(r->idiag_states & (1 << sk->sk_state)))
124 goto next;
125 if (r->sdiag_family != AF_UNSPEC &&
126 sk->sk_family != r->sdiag_family)
127 goto next;
128 if (r->id.idiag_sport != inet->inet_sport &&
129 r->id.idiag_sport)
130 goto next;
131 if (r->id.idiag_dport != inet->inet_dport &&
132 r->id.idiag_dport)
133 goto next;
134
135 if (sk_diag_dump(sk, skb, cb, r, bc) < 0) {
136 spin_unlock_bh(&hslot->lock);
137 goto done;
138 }
139next:
140 num++;
141 }
142 spin_unlock_bh(&hslot->lock);
143 }
144done:
145 cb->args[0] = slot;
146 cb->args[1] = num;
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000147}
148
149static void udp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
Eric Dumazet34160ea2015-03-10 07:15:54 -0700150 const struct inet_diag_req_v2 *r, struct nlattr *bc)
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000151{
152 udp_dump(&udp_table, skb, cb, r, bc);
153}
154
155static int udp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
Eric Dumazet34160ea2015-03-10 07:15:54 -0700156 const struct inet_diag_req_v2 *req)
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000157{
158 return udp_dump_one(&udp_table, in_skb, nlh, req);
159}
160
Shan Wei62ad6fc2012-04-24 18:15:41 +0000161static void udp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
162 void *info)
163{
164 r->idiag_rqueue = sk_rmem_alloc_get(sk);
165 r->idiag_wqueue = sk_wmem_alloc_get(sk);
166}
167
David Ahern5d77dca2016-08-23 21:06:33 -0700168#ifdef CONFIG_INET_DIAG_DESTROY
169static int __udp_diag_destroy(struct sk_buff *in_skb,
170 const struct inet_diag_req_v2 *req,
171 struct udp_table *tbl)
172{
173 struct net *net = sock_net(in_skb->sk);
174 struct sock *sk;
175 int err;
176
177 rcu_read_lock();
178
179 if (req->sdiag_family == AF_INET)
180 sk = __udp4_lib_lookup(net,
181 req->id.idiag_dst[0], req->id.idiag_dport,
182 req->id.idiag_src[0], req->id.idiag_sport,
183 req->id.idiag_if, tbl, NULL);
184#if IS_ENABLED(CONFIG_IPV6)
185 else if (req->sdiag_family == AF_INET6) {
186 if (ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_dst) &&
187 ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_src))
188 sk = __udp4_lib_lookup(net,
189 req->id.idiag_dst[0], req->id.idiag_dport,
190 req->id.idiag_src[0], req->id.idiag_sport,
191 req->id.idiag_if, tbl, NULL);
192
193 else
194 sk = __udp6_lib_lookup(net,
195 (struct in6_addr *)req->id.idiag_dst,
196 req->id.idiag_dport,
197 (struct in6_addr *)req->id.idiag_src,
198 req->id.idiag_sport,
199 req->id.idiag_if, tbl, NULL);
200 }
201#endif
202 else {
203 rcu_read_unlock();
204 return -EINVAL;
205 }
206
207 if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
208 sk = NULL;
209
210 rcu_read_unlock();
211
212 if (!sk)
213 return -ENOENT;
214
215 if (sock_diag_check_cookie(sk, req->id.idiag_cookie)) {
216 sock_put(sk);
217 return -ENOENT;
218 }
219
220 err = sock_diag_destroy(sk, ECONNABORTED);
221
222 sock_put(sk);
223
224 return err;
225}
226
227static int udp_diag_destroy(struct sk_buff *in_skb,
228 const struct inet_diag_req_v2 *req)
229{
230 return __udp_diag_destroy(in_skb, req, &udp_table);
231}
232
233static int udplite_diag_destroy(struct sk_buff *in_skb,
234 const struct inet_diag_req_v2 *req)
235{
236 return __udp_diag_destroy(in_skb, req, &udplite_table);
237}
238
239#endif
240
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000241static const struct inet_diag_handler udp_diag_handler = {
242 .dump = udp_diag_dump,
243 .dump_one = udp_diag_dump_one,
Shan Wei62ad6fc2012-04-24 18:15:41 +0000244 .idiag_get_info = udp_diag_get_info,
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000245 .idiag_type = IPPROTO_UDP,
Craig Gallek3fd22af2015-06-15 11:26:19 -0400246 .idiag_info_size = 0,
David Ahern5d77dca2016-08-23 21:06:33 -0700247#ifdef CONFIG_INET_DIAG_DESTROY
248 .destroy = udp_diag_destroy,
249#endif
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000250};
251
252static void udplite_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
Eric Dumazet34160ea2015-03-10 07:15:54 -0700253 const struct inet_diag_req_v2 *r,
254 struct nlattr *bc)
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000255{
256 udp_dump(&udplite_table, skb, cb, r, bc);
257}
258
259static int udplite_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
Eric Dumazet34160ea2015-03-10 07:15:54 -0700260 const struct inet_diag_req_v2 *req)
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000261{
262 return udp_dump_one(&udplite_table, in_skb, nlh, req);
263}
264
265static const struct inet_diag_handler udplite_diag_handler = {
266 .dump = udplite_diag_dump,
267 .dump_one = udplite_diag_dump_one,
Shan Wei62ad6fc2012-04-24 18:15:41 +0000268 .idiag_get_info = udp_diag_get_info,
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000269 .idiag_type = IPPROTO_UDPLITE,
Craig Gallek3fd22af2015-06-15 11:26:19 -0400270 .idiag_info_size = 0,
David Ahern5d77dca2016-08-23 21:06:33 -0700271#ifdef CONFIG_INET_DIAG_DESTROY
272 .destroy = udplite_diag_destroy,
273#endif
Pavel Emelyanov52b7c592011-12-09 06:23:51 +0000274};
275
276static int __init udp_diag_init(void)
277{
278 int err;
279
280 err = inet_diag_register(&udp_diag_handler);
281 if (err)
282 goto out;
283 err = inet_diag_register(&udplite_diag_handler);
284 if (err)
285 goto out_lite;
286out:
287 return err;
288out_lite:
289 inet_diag_unregister(&udp_diag_handler);
290 goto out;
291}
292
293static void __exit udp_diag_exit(void)
294{
295 inet_diag_unregister(&udplite_diag_handler);
296 inet_diag_unregister(&udp_diag_handler);
297}
298
299module_init(udp_diag_init);
300module_exit(udp_diag_exit);
301MODULE_LICENSE("GPL");
Pavel Emelyanovaec8dc62011-12-15 02:43:27 +0000302MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-17 /* AF_INET - IPPROTO_UDP */);
303MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-136 /* AF_INET - IPPROTO_UDPLITE */);