Pavel Emelyanov | 52b7c59 | 2011-12-09 06:23:51 +0000 | [diff] [blame] | 1 | /* |
| 2 | * udp_diag.c Module for monitoring UDP transport protocols sockets. |
| 3 | * |
| 4 | * Authors: Pavel Emelyanov, <xemul@parallels.com> |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the License, or (at your option) any later version. |
| 10 | */ |
| 11 | |
| 12 | |
| 13 | #include <linux/module.h> |
| 14 | #include <linux/inet_diag.h> |
| 15 | #include <linux/udp.h> |
| 16 | #include <net/udp.h> |
| 17 | #include <net/udplite.h> |
Pavel Emelyanov | 52b7c59 | 2011-12-09 06:23:51 +0000 | [diff] [blame] | 18 | #include <linux/sock_diag.h> |
| 19 | |
Pavel Emelyanov | b6d640c | 2011-12-09 06:24:21 +0000 | [diff] [blame] | 20 | static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, |
Eric Dumazet | 34160ea | 2015-03-10 07:15:54 -0700 | [diff] [blame] | 21 | struct netlink_callback *cb, |
| 22 | const struct inet_diag_req_v2 *req, |
Lorenzo Colitti | d545cac | 2016-09-08 00:42:25 +0900 | [diff] [blame] | 23 | struct nlattr *bc, bool net_admin) |
Pavel Emelyanov | b6d640c | 2011-12-09 06:24:21 +0000 | [diff] [blame] | 24 | { |
| 25 | if (!inet_diag_bc_sk(bc, sk)) |
| 26 | return 0; |
| 27 | |
Eric W. Biederman | d06ca95 | 2012-05-24 17:58:08 -0600 | [diff] [blame] | 28 | return inet_sk_diag_fill(sk, NULL, skb, req, |
Patrick McHardy | e32123e | 2013-04-17 06:46:57 +0000 | [diff] [blame] | 29 | sk_user_ns(NETLINK_CB(cb->skb).sk), |
Eric W. Biederman | 15e4730 | 2012-09-07 20:12:54 +0000 | [diff] [blame] | 30 | NETLINK_CB(cb->skb).portid, |
Lorenzo Colitti | d545cac | 2016-09-08 00:42:25 +0900 | [diff] [blame] | 31 | cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh, net_admin); |
Pavel Emelyanov | b6d640c | 2011-12-09 06:24:21 +0000 | [diff] [blame] | 32 | } |
| 33 | |
Pavel Emelyanov | 52b7c59 | 2011-12-09 06:23:51 +0000 | [diff] [blame] | 34 | static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb, |
Eric Dumazet | 34160ea | 2015-03-10 07:15:54 -0700 | [diff] [blame] | 35 | const struct nlmsghdr *nlh, |
| 36 | const struct inet_diag_req_v2 *req) |
Pavel Emelyanov | 52b7c59 | 2011-12-09 06:23:51 +0000 | [diff] [blame] | 37 | { |
Pavel Emelyanov | a925aa0 | 2011-12-09 06:24:06 +0000 | [diff] [blame] | 38 | int err = -EINVAL; |
Eric Dumazet | ca065d0 | 2016-04-01 08:52:13 -0700 | [diff] [blame] | 39 | struct sock *sk = NULL; |
Pavel Emelyanov | a925aa0 | 2011-12-09 06:24:06 +0000 | [diff] [blame] | 40 | struct sk_buff *rep; |
Andrey Vagin | 51d7ccc | 2012-07-16 04:28:49 +0000 | [diff] [blame] | 41 | struct net *net = sock_net(in_skb->sk); |
Pavel Emelyanov | a925aa0 | 2011-12-09 06:24:06 +0000 | [diff] [blame] | 42 | |
Eric Dumazet | ca065d0 | 2016-04-01 08:52:13 -0700 | [diff] [blame] | 43 | rcu_read_lock(); |
Pavel Emelyanov | a925aa0 | 2011-12-09 06:24:06 +0000 | [diff] [blame] | 44 | if (req->sdiag_family == AF_INET) |
Andrey Vagin | 51d7ccc | 2012-07-16 04:28:49 +0000 | [diff] [blame] | 45 | sk = __udp4_lib_lookup(net, |
Pavel Emelyanov | a925aa0 | 2011-12-09 06:24:06 +0000 | [diff] [blame] | 46 | req->id.idiag_src[0], req->id.idiag_sport, |
| 47 | req->id.idiag_dst[0], req->id.idiag_dport, |
Craig Gallek | 538950a | 2016-01-04 17:41:47 -0500 | [diff] [blame] | 48 | req->id.idiag_if, tbl, NULL); |
Pavel Emelyanov | 86e62ad | 2011-12-09 23:35:07 +0000 | [diff] [blame] | 49 | #if IS_ENABLED(CONFIG_IPV6) |
Pavel Emelyanov | a925aa0 | 2011-12-09 06:24:06 +0000 | [diff] [blame] | 50 | else if (req->sdiag_family == AF_INET6) |
Andrey Vagin | 51d7ccc | 2012-07-16 04:28:49 +0000 | [diff] [blame] | 51 | sk = __udp6_lib_lookup(net, |
Pavel Emelyanov | a925aa0 | 2011-12-09 06:24:06 +0000 | [diff] [blame] | 52 | (struct in6_addr *)req->id.idiag_src, |
| 53 | req->id.idiag_sport, |
| 54 | (struct in6_addr *)req->id.idiag_dst, |
| 55 | req->id.idiag_dport, |
Craig Gallek | 538950a | 2016-01-04 17:41:47 -0500 | [diff] [blame] | 56 | req->id.idiag_if, tbl, NULL); |
Pavel Emelyanov | 86e62ad | 2011-12-09 23:35:07 +0000 | [diff] [blame] | 57 | #endif |
Eric Dumazet | ca065d0 | 2016-04-01 08:52:13 -0700 | [diff] [blame] | 58 | if (sk && !atomic_inc_not_zero(&sk->sk_refcnt)) |
| 59 | sk = NULL; |
| 60 | rcu_read_unlock(); |
Pavel Emelyanov | a925aa0 | 2011-12-09 06:24:06 +0000 | [diff] [blame] | 61 | err = -ENOENT; |
Ian Morris | 51456b2 | 2015-04-03 09:17:26 +0100 | [diff] [blame] | 62 | if (!sk) |
Pavel Emelyanov | a925aa0 | 2011-12-09 06:24:06 +0000 | [diff] [blame] | 63 | goto out_nosk; |
| 64 | |
Pavel Emelyanov | f65c1b5 | 2011-12-15 02:43:44 +0000 | [diff] [blame] | 65 | err = sock_diag_check_cookie(sk, req->id.idiag_cookie); |
Pavel Emelyanov | a925aa0 | 2011-12-09 06:24:06 +0000 | [diff] [blame] | 66 | if (err) |
| 67 | goto out; |
| 68 | |
| 69 | err = -ENOMEM; |
Hong zhi guo | 573ce26 | 2013-03-27 06:47:04 +0000 | [diff] [blame] | 70 | rep = nlmsg_new(sizeof(struct inet_diag_msg) + |
| 71 | sizeof(struct inet_diag_meminfo) + 64, |
| 72 | GFP_KERNEL); |
Pavel Emelyanov | a925aa0 | 2011-12-09 06:24:06 +0000 | [diff] [blame] | 73 | if (!rep) |
| 74 | goto out; |
| 75 | |
| 76 | err = inet_sk_diag_fill(sk, NULL, rep, req, |
Patrick McHardy | e32123e | 2013-04-17 06:46:57 +0000 | [diff] [blame] | 77 | sk_user_ns(NETLINK_CB(in_skb).sk), |
Eric W. Biederman | 15e4730 | 2012-09-07 20:12:54 +0000 | [diff] [blame] | 78 | NETLINK_CB(in_skb).portid, |
Lorenzo Colitti | d545cac | 2016-09-08 00:42:25 +0900 | [diff] [blame] | 79 | nlh->nlmsg_seq, 0, nlh, |
| 80 | netlink_net_capable(in_skb, CAP_NET_ADMIN)); |
Pavel Emelyanov | a925aa0 | 2011-12-09 06:24:06 +0000 | [diff] [blame] | 81 | if (err < 0) { |
| 82 | WARN_ON(err == -EMSGSIZE); |
| 83 | kfree_skb(rep); |
| 84 | goto out; |
| 85 | } |
Eric W. Biederman | 15e4730 | 2012-09-07 20:12:54 +0000 | [diff] [blame] | 86 | err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid, |
Pavel Emelyanov | a925aa0 | 2011-12-09 06:24:06 +0000 | [diff] [blame] | 87 | MSG_DONTWAIT); |
| 88 | if (err > 0) |
| 89 | err = 0; |
| 90 | out: |
| 91 | if (sk) |
| 92 | sock_put(sk); |
| 93 | out_nosk: |
| 94 | return err; |
Pavel Emelyanov | 52b7c59 | 2011-12-09 06:23:51 +0000 | [diff] [blame] | 95 | } |
| 96 | |
Eric Dumazet | 34160ea | 2015-03-10 07:15:54 -0700 | [diff] [blame] | 97 | static void udp_dump(struct udp_table *table, struct sk_buff *skb, |
| 98 | struct netlink_callback *cb, |
| 99 | const struct inet_diag_req_v2 *r, struct nlattr *bc) |
Pavel Emelyanov | 52b7c59 | 2011-12-09 06:23:51 +0000 | [diff] [blame] | 100 | { |
Lorenzo Colitti | d545cac | 2016-09-08 00:42:25 +0900 | [diff] [blame] | 101 | bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN); |
Andrey Vagin | 51d7ccc | 2012-07-16 04:28:49 +0000 | [diff] [blame] | 102 | struct net *net = sock_net(skb->sk); |
Eric Dumazet | ca065d0 | 2016-04-01 08:52:13 -0700 | [diff] [blame] | 103 | int num, s_num, slot, s_slot; |
Pavel Emelyanov | b6d640c | 2011-12-09 06:24:21 +0000 | [diff] [blame] | 104 | |
| 105 | s_slot = cb->args[0]; |
| 106 | num = s_num = cb->args[1]; |
| 107 | |
Herbert Xu | 86f3cdd | 2015-01-24 08:02:40 +1100 | [diff] [blame] | 108 | for (slot = s_slot; slot <= table->mask; s_num = 0, slot++) { |
Pavel Emelyanov | b6d640c | 2011-12-09 06:24:21 +0000 | [diff] [blame] | 109 | struct udp_hslot *hslot = &table->hash[slot]; |
Eric Dumazet | ca065d0 | 2016-04-01 08:52:13 -0700 | [diff] [blame] | 110 | struct sock *sk; |
Pavel Emelyanov | b6d640c | 2011-12-09 06:24:21 +0000 | [diff] [blame] | 111 | |
Herbert Xu | 86f3cdd | 2015-01-24 08:02:40 +1100 | [diff] [blame] | 112 | num = 0; |
| 113 | |
Eric Dumazet | ca065d0 | 2016-04-01 08:52:13 -0700 | [diff] [blame] | 114 | if (hlist_empty(&hslot->head)) |
Pavel Emelyanov | b6d640c | 2011-12-09 06:24:21 +0000 | [diff] [blame] | 115 | continue; |
| 116 | |
| 117 | spin_lock_bh(&hslot->lock); |
Eric Dumazet | ca065d0 | 2016-04-01 08:52:13 -0700 | [diff] [blame] | 118 | sk_for_each(sk, &hslot->head) { |
Pavel Emelyanov | b6d640c | 2011-12-09 06:24:21 +0000 | [diff] [blame] | 119 | struct inet_sock *inet = inet_sk(sk); |
| 120 | |
Andrey Vagin | 51d7ccc | 2012-07-16 04:28:49 +0000 | [diff] [blame] | 121 | if (!net_eq(sock_net(sk), net)) |
| 122 | continue; |
Pavel Emelyanov | b6d640c | 2011-12-09 06:24:21 +0000 | [diff] [blame] | 123 | if (num < s_num) |
| 124 | goto next; |
| 125 | if (!(r->idiag_states & (1 << sk->sk_state))) |
| 126 | goto next; |
| 127 | if (r->sdiag_family != AF_UNSPEC && |
| 128 | sk->sk_family != r->sdiag_family) |
| 129 | goto next; |
| 130 | if (r->id.idiag_sport != inet->inet_sport && |
| 131 | r->id.idiag_sport) |
| 132 | goto next; |
| 133 | if (r->id.idiag_dport != inet->inet_dport && |
| 134 | r->id.idiag_dport) |
| 135 | goto next; |
| 136 | |
Lorenzo Colitti | d545cac | 2016-09-08 00:42:25 +0900 | [diff] [blame] | 137 | if (sk_diag_dump(sk, skb, cb, r, bc, net_admin) < 0) { |
Pavel Emelyanov | b6d640c | 2011-12-09 06:24:21 +0000 | [diff] [blame] | 138 | spin_unlock_bh(&hslot->lock); |
| 139 | goto done; |
| 140 | } |
| 141 | next: |
| 142 | num++; |
| 143 | } |
| 144 | spin_unlock_bh(&hslot->lock); |
| 145 | } |
| 146 | done: |
| 147 | cb->args[0] = slot; |
| 148 | cb->args[1] = num; |
Pavel Emelyanov | 52b7c59 | 2011-12-09 06:23:51 +0000 | [diff] [blame] | 149 | } |
| 150 | |
| 151 | static void udp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, |
Eric Dumazet | 34160ea | 2015-03-10 07:15:54 -0700 | [diff] [blame] | 152 | const struct inet_diag_req_v2 *r, struct nlattr *bc) |
Pavel Emelyanov | 52b7c59 | 2011-12-09 06:23:51 +0000 | [diff] [blame] | 153 | { |
| 154 | udp_dump(&udp_table, skb, cb, r, bc); |
| 155 | } |
| 156 | |
| 157 | static int udp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh, |
Eric Dumazet | 34160ea | 2015-03-10 07:15:54 -0700 | [diff] [blame] | 158 | const struct inet_diag_req_v2 *req) |
Pavel Emelyanov | 52b7c59 | 2011-12-09 06:23:51 +0000 | [diff] [blame] | 159 | { |
| 160 | return udp_dump_one(&udp_table, in_skb, nlh, req); |
| 161 | } |
| 162 | |
Shan Wei | 62ad6fc | 2012-04-24 18:15:41 +0000 | [diff] [blame] | 163 | static void udp_diag_get_info(struct sock *sk, struct inet_diag_msg *r, |
| 164 | void *info) |
| 165 | { |
| 166 | r->idiag_rqueue = sk_rmem_alloc_get(sk); |
| 167 | r->idiag_wqueue = sk_wmem_alloc_get(sk); |
| 168 | } |
| 169 | |
David Ahern | 5d77dca | 2016-08-23 21:06:33 -0700 | [diff] [blame] | 170 | #ifdef CONFIG_INET_DIAG_DESTROY |
| 171 | static int __udp_diag_destroy(struct sk_buff *in_skb, |
| 172 | const struct inet_diag_req_v2 *req, |
| 173 | struct udp_table *tbl) |
| 174 | { |
| 175 | struct net *net = sock_net(in_skb->sk); |
| 176 | struct sock *sk; |
| 177 | int err; |
| 178 | |
| 179 | rcu_read_lock(); |
| 180 | |
| 181 | if (req->sdiag_family == AF_INET) |
| 182 | sk = __udp4_lib_lookup(net, |
| 183 | req->id.idiag_dst[0], req->id.idiag_dport, |
| 184 | req->id.idiag_src[0], req->id.idiag_sport, |
| 185 | req->id.idiag_if, tbl, NULL); |
| 186 | #if IS_ENABLED(CONFIG_IPV6) |
| 187 | else if (req->sdiag_family == AF_INET6) { |
| 188 | if (ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_dst) && |
| 189 | ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_src)) |
| 190 | sk = __udp4_lib_lookup(net, |
Lorenzo Colitti | f95bf34 | 2016-09-07 13:38:35 +0900 | [diff] [blame] | 191 | req->id.idiag_dst[3], req->id.idiag_dport, |
| 192 | req->id.idiag_src[3], req->id.idiag_sport, |
David Ahern | 5d77dca | 2016-08-23 21:06:33 -0700 | [diff] [blame] | 193 | req->id.idiag_if, tbl, NULL); |
| 194 | |
| 195 | else |
| 196 | sk = __udp6_lib_lookup(net, |
| 197 | (struct in6_addr *)req->id.idiag_dst, |
| 198 | req->id.idiag_dport, |
| 199 | (struct in6_addr *)req->id.idiag_src, |
| 200 | req->id.idiag_sport, |
| 201 | req->id.idiag_if, tbl, NULL); |
| 202 | } |
| 203 | #endif |
| 204 | else { |
| 205 | rcu_read_unlock(); |
| 206 | return -EINVAL; |
| 207 | } |
| 208 | |
| 209 | if (sk && !atomic_inc_not_zero(&sk->sk_refcnt)) |
| 210 | sk = NULL; |
| 211 | |
| 212 | rcu_read_unlock(); |
| 213 | |
| 214 | if (!sk) |
| 215 | return -ENOENT; |
| 216 | |
| 217 | if (sock_diag_check_cookie(sk, req->id.idiag_cookie)) { |
| 218 | sock_put(sk); |
| 219 | return -ENOENT; |
| 220 | } |
| 221 | |
| 222 | err = sock_diag_destroy(sk, ECONNABORTED); |
| 223 | |
| 224 | sock_put(sk); |
| 225 | |
| 226 | return err; |
| 227 | } |
| 228 | |
| 229 | static int udp_diag_destroy(struct sk_buff *in_skb, |
| 230 | const struct inet_diag_req_v2 *req) |
| 231 | { |
| 232 | return __udp_diag_destroy(in_skb, req, &udp_table); |
| 233 | } |
| 234 | |
| 235 | static int udplite_diag_destroy(struct sk_buff *in_skb, |
| 236 | const struct inet_diag_req_v2 *req) |
| 237 | { |
| 238 | return __udp_diag_destroy(in_skb, req, &udplite_table); |
| 239 | } |
| 240 | |
| 241 | #endif |
| 242 | |
Pavel Emelyanov | 52b7c59 | 2011-12-09 06:23:51 +0000 | [diff] [blame] | 243 | static const struct inet_diag_handler udp_diag_handler = { |
| 244 | .dump = udp_diag_dump, |
| 245 | .dump_one = udp_diag_dump_one, |
Shan Wei | 62ad6fc | 2012-04-24 18:15:41 +0000 | [diff] [blame] | 246 | .idiag_get_info = udp_diag_get_info, |
Pavel Emelyanov | 52b7c59 | 2011-12-09 06:23:51 +0000 | [diff] [blame] | 247 | .idiag_type = IPPROTO_UDP, |
Craig Gallek | 3fd22af | 2015-06-15 11:26:19 -0400 | [diff] [blame] | 248 | .idiag_info_size = 0, |
David Ahern | 5d77dca | 2016-08-23 21:06:33 -0700 | [diff] [blame] | 249 | #ifdef CONFIG_INET_DIAG_DESTROY |
| 250 | .destroy = udp_diag_destroy, |
| 251 | #endif |
Pavel Emelyanov | 52b7c59 | 2011-12-09 06:23:51 +0000 | [diff] [blame] | 252 | }; |
| 253 | |
| 254 | static void udplite_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, |
Eric Dumazet | 34160ea | 2015-03-10 07:15:54 -0700 | [diff] [blame] | 255 | const struct inet_diag_req_v2 *r, |
| 256 | struct nlattr *bc) |
Pavel Emelyanov | 52b7c59 | 2011-12-09 06:23:51 +0000 | [diff] [blame] | 257 | { |
| 258 | udp_dump(&udplite_table, skb, cb, r, bc); |
| 259 | } |
| 260 | |
| 261 | static int udplite_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh, |
Eric Dumazet | 34160ea | 2015-03-10 07:15:54 -0700 | [diff] [blame] | 262 | const struct inet_diag_req_v2 *req) |
Pavel Emelyanov | 52b7c59 | 2011-12-09 06:23:51 +0000 | [diff] [blame] | 263 | { |
| 264 | return udp_dump_one(&udplite_table, in_skb, nlh, req); |
| 265 | } |
| 266 | |
| 267 | static const struct inet_diag_handler udplite_diag_handler = { |
| 268 | .dump = udplite_diag_dump, |
| 269 | .dump_one = udplite_diag_dump_one, |
Shan Wei | 62ad6fc | 2012-04-24 18:15:41 +0000 | [diff] [blame] | 270 | .idiag_get_info = udp_diag_get_info, |
Pavel Emelyanov | 52b7c59 | 2011-12-09 06:23:51 +0000 | [diff] [blame] | 271 | .idiag_type = IPPROTO_UDPLITE, |
Craig Gallek | 3fd22af | 2015-06-15 11:26:19 -0400 | [diff] [blame] | 272 | .idiag_info_size = 0, |
David Ahern | 5d77dca | 2016-08-23 21:06:33 -0700 | [diff] [blame] | 273 | #ifdef CONFIG_INET_DIAG_DESTROY |
| 274 | .destroy = udplite_diag_destroy, |
| 275 | #endif |
Pavel Emelyanov | 52b7c59 | 2011-12-09 06:23:51 +0000 | [diff] [blame] | 276 | }; |
| 277 | |
| 278 | static int __init udp_diag_init(void) |
| 279 | { |
| 280 | int err; |
| 281 | |
| 282 | err = inet_diag_register(&udp_diag_handler); |
| 283 | if (err) |
| 284 | goto out; |
| 285 | err = inet_diag_register(&udplite_diag_handler); |
| 286 | if (err) |
| 287 | goto out_lite; |
| 288 | out: |
| 289 | return err; |
| 290 | out_lite: |
| 291 | inet_diag_unregister(&udp_diag_handler); |
| 292 | goto out; |
| 293 | } |
| 294 | |
| 295 | static void __exit udp_diag_exit(void) |
| 296 | { |
| 297 | inet_diag_unregister(&udplite_diag_handler); |
| 298 | inet_diag_unregister(&udp_diag_handler); |
| 299 | } |
| 300 | |
| 301 | module_init(udp_diag_init); |
| 302 | module_exit(udp_diag_exit); |
| 303 | MODULE_LICENSE("GPL"); |
Pavel Emelyanov | aec8dc62 | 2011-12-15 02:43:27 +0000 | [diff] [blame] | 304 | MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-17 /* AF_INET - IPPROTO_UDP */); |
| 305 | MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-136 /* AF_INET - IPPROTO_UDPLITE */); |