blob: cef0cee182d447e65bfd1bdfbe8cc0201156b221 [file] [log] [blame]
Xin Long8f840e42016-04-14 15:35:33 +08001#include <linux/module.h>
2#include <linux/inet_diag.h>
3#include <linux/sock_diag.h>
4#include <net/sctp/sctp.h>
5
Xin Long8f840e42016-04-14 15:35:33 +08006static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
7 void *info);
8
9/* define some functions to make asoc/ep fill look clean */
10static void inet_diag_msg_sctpasoc_fill(struct inet_diag_msg *r,
11 struct sock *sk,
12 struct sctp_association *asoc)
13{
14 union sctp_addr laddr, paddr;
15 struct dst_entry *dst;
Phil Sutter12474e82016-08-04 12:11:56 +020016 struct timer_list *t3_rtx = &asoc->peer.primary_path->T3_rtx_timer;
Xin Long8f840e42016-04-14 15:35:33 +080017
18 laddr = list_entry(asoc->base.bind_addr.address_list.next,
19 struct sctp_sockaddr_entry, list)->a;
20 paddr = asoc->peer.primary_path->ipaddr;
21 dst = asoc->peer.primary_path->dst;
22
23 r->idiag_family = sk->sk_family;
24 r->id.idiag_sport = htons(asoc->base.bind_addr.port);
25 r->id.idiag_dport = htons(asoc->peer.port);
26 r->id.idiag_if = dst ? dst->dev->ifindex : 0;
27 sock_diag_save_cookie(sk, r->id.idiag_cookie);
28
29#if IS_ENABLED(CONFIG_IPV6)
30 if (sk->sk_family == AF_INET6) {
31 *(struct in6_addr *)r->id.idiag_src = laddr.v6.sin6_addr;
32 *(struct in6_addr *)r->id.idiag_dst = paddr.v6.sin6_addr;
33 } else
34#endif
35 {
36 memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
37 memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
38
39 r->id.idiag_src[0] = laddr.v4.sin_addr.s_addr;
40 r->id.idiag_dst[0] = paddr.v4.sin_addr.s_addr;
41 }
42
43 r->idiag_state = asoc->state;
Phil Sutter12474e82016-08-04 12:11:56 +020044 if (timer_pending(t3_rtx)) {
45 r->idiag_timer = SCTP_EVENT_TIMEOUT_T3_RTX;
46 r->idiag_retrans = asoc->rtx_data_chunks;
47 r->idiag_expires = jiffies_to_msecs(t3_rtx->expires - jiffies);
48 } else {
49 r->idiag_timer = 0;
50 r->idiag_retrans = 0;
51 r->idiag_expires = 0;
52 }
Xin Long8f840e42016-04-14 15:35:33 +080053}
54
55static int inet_diag_msg_sctpladdrs_fill(struct sk_buff *skb,
56 struct list_head *address_list)
57{
58 struct sctp_sockaddr_entry *laddr;
59 int addrlen = sizeof(struct sockaddr_storage);
60 int addrcnt = 0;
61 struct nlattr *attr;
62 void *info = NULL;
63
64 list_for_each_entry_rcu(laddr, address_list, list)
65 addrcnt++;
66
67 attr = nla_reserve(skb, INET_DIAG_LOCALS, addrlen * addrcnt);
68 if (!attr)
69 return -EMSGSIZE;
70
71 info = nla_data(attr);
72 list_for_each_entry_rcu(laddr, address_list, list) {
73 memcpy(info, &laddr->a, addrlen);
74 info += addrlen;
75 }
76
77 return 0;
78}
79
80static int inet_diag_msg_sctpaddrs_fill(struct sk_buff *skb,
81 struct sctp_association *asoc)
82{
83 int addrlen = sizeof(struct sockaddr_storage);
84 struct sctp_transport *from;
85 struct nlattr *attr;
86 void *info = NULL;
87
88 attr = nla_reserve(skb, INET_DIAG_PEERS,
89 addrlen * asoc->peer.transport_count);
90 if (!attr)
91 return -EMSGSIZE;
92
93 info = nla_data(attr);
94 list_for_each_entry(from, &asoc->peer.transport_addr_list,
95 transports) {
96 memcpy(info, &from->ipaddr, addrlen);
97 info += addrlen;
98 }
99
100 return 0;
101}
102
103/* sctp asoc/ep fill*/
104static int inet_sctp_diag_fill(struct sock *sk, struct sctp_association *asoc,
105 struct sk_buff *skb,
106 const struct inet_diag_req_v2 *req,
107 struct user_namespace *user_ns,
108 int portid, u32 seq, u16 nlmsg_flags,
109 const struct nlmsghdr *unlh)
110{
111 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
112 struct list_head *addr_list;
113 struct inet_diag_msg *r;
114 struct nlmsghdr *nlh;
115 int ext = req->idiag_ext;
116 struct sctp_infox infox;
117 void *info = NULL;
118
119 nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
120 nlmsg_flags);
121 if (!nlh)
122 return -EMSGSIZE;
123
124 r = nlmsg_data(nlh);
125 BUG_ON(!sk_fullsock(sk));
126
127 if (asoc) {
128 inet_diag_msg_sctpasoc_fill(r, sk, asoc);
129 } else {
130 inet_diag_msg_common_fill(r, sk);
131 r->idiag_state = sk->sk_state;
132 r->idiag_timer = 0;
133 r->idiag_retrans = 0;
134 }
135
136 if (inet_diag_msg_attrs_fill(sk, skb, r, ext, user_ns))
137 goto errout;
138
139 if (ext & (1 << (INET_DIAG_SKMEMINFO - 1))) {
140 u32 mem[SK_MEMINFO_VARS];
141 int amt;
142
143 if (asoc && asoc->ep->sndbuf_policy)
144 amt = asoc->sndbuf_used;
145 else
146 amt = sk_wmem_alloc_get(sk);
147 mem[SK_MEMINFO_WMEM_ALLOC] = amt;
Xin Longf052f202016-04-24 23:21:22 +0800148 if (asoc && asoc->ep->rcvbuf_policy)
149 amt = atomic_read(&asoc->rmem_alloc);
150 else
151 amt = sk_rmem_alloc_get(sk);
152 mem[SK_MEMINFO_RMEM_ALLOC] = amt;
Xin Long8f840e42016-04-14 15:35:33 +0800153 mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf;
154 mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf;
155 mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
156 mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
157 mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
158 mem[SK_MEMINFO_BACKLOG] = sk->sk_backlog.len;
159 mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
160
161 if (nla_put(skb, INET_DIAG_SKMEMINFO, sizeof(mem), &mem) < 0)
162 goto errout;
163 }
164
165 if (ext & (1 << (INET_DIAG_INFO - 1))) {
166 struct nlattr *attr;
167
Nicolas Dichtel6ed46d12016-04-26 10:06:14 +0200168 attr = nla_reserve_64bit(skb, INET_DIAG_INFO,
169 sizeof(struct sctp_info),
170 INET_DIAG_PAD);
Xin Long8f840e42016-04-14 15:35:33 +0800171 if (!attr)
172 goto errout;
173
174 info = nla_data(attr);
175 }
176 infox.sctpinfo = (struct sctp_info *)info;
177 infox.asoc = asoc;
178 sctp_diag_get_info(sk, r, &infox);
179
180 addr_list = asoc ? &asoc->base.bind_addr.address_list
181 : &ep->base.bind_addr.address_list;
182 if (inet_diag_msg_sctpladdrs_fill(skb, addr_list))
183 goto errout;
184
185 if (asoc && (ext & (1 << (INET_DIAG_CONG - 1))))
186 if (nla_put_string(skb, INET_DIAG_CONG, "reno") < 0)
187 goto errout;
188
189 if (asoc && inet_diag_msg_sctpaddrs_fill(skb, asoc))
190 goto errout;
191
192 nlmsg_end(skb, nlh);
193 return 0;
194
195errout:
196 nlmsg_cancel(skb, nlh);
197 return -EMSGSIZE;
198}
199
200/* callback and param */
201struct sctp_comm_param {
202 struct sk_buff *skb;
203 struct netlink_callback *cb;
204 const struct inet_diag_req_v2 *r;
205 const struct nlmsghdr *nlh;
206};
207
208static size_t inet_assoc_attr_size(struct sctp_association *asoc)
209{
210 int addrlen = sizeof(struct sockaddr_storage);
211 int addrcnt = 0;
212 struct sctp_sockaddr_entry *laddr;
213
214 list_for_each_entry_rcu(laddr, &asoc->base.bind_addr.address_list,
215 list)
216 addrcnt++;
217
218 return nla_total_size(sizeof(struct sctp_info))
219 + nla_total_size(1) /* INET_DIAG_SHUTDOWN */
220 + nla_total_size(1) /* INET_DIAG_TOS */
221 + nla_total_size(1) /* INET_DIAG_TCLASS */
222 + nla_total_size(addrlen * asoc->peer.transport_count)
223 + nla_total_size(addrlen * addrcnt)
224 + nla_total_size(sizeof(struct inet_diag_meminfo))
225 + nla_total_size(sizeof(struct inet_diag_msg))
226 + 64;
227}
228
229static int sctp_tsp_dump_one(struct sctp_transport *tsp, void *p)
230{
231 struct sctp_association *assoc = tsp->asoc;
232 struct sock *sk = tsp->asoc->base.sk;
233 struct sctp_comm_param *commp = p;
234 struct sk_buff *in_skb = commp->skb;
235 const struct inet_diag_req_v2 *req = commp->r;
236 const struct nlmsghdr *nlh = commp->nlh;
237 struct net *net = sock_net(in_skb->sk);
238 struct sk_buff *rep;
239 int err;
240
241 err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
242 if (err)
243 goto out;
244
245 err = -ENOMEM;
246 rep = nlmsg_new(inet_assoc_attr_size(assoc), GFP_KERNEL);
247 if (!rep)
248 goto out;
249
250 lock_sock(sk);
251 if (sk != assoc->base.sk) {
252 release_sock(sk);
253 sk = assoc->base.sk;
254 lock_sock(sk);
255 }
256 err = inet_sctp_diag_fill(sk, assoc, rep, req,
257 sk_user_ns(NETLINK_CB(in_skb).sk),
258 NETLINK_CB(in_skb).portid,
259 nlh->nlmsg_seq, 0, nlh);
260 release_sock(sk);
261 if (err < 0) {
262 WARN_ON(err == -EMSGSIZE);
263 kfree_skb(rep);
264 goto out;
265 }
266
267 err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
268 MSG_DONTWAIT);
269 if (err > 0)
270 err = 0;
271out:
272 return err;
273}
274
Xin Long1cceda782016-09-29 02:55:44 +0800275static int sctp_sock_dump(struct sock *sk, void *p)
Xin Long8f840e42016-04-14 15:35:33 +0800276{
Xin Long1cceda782016-09-29 02:55:44 +0800277 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
Xin Long8f840e42016-04-14 15:35:33 +0800278 struct sctp_comm_param *commp = p;
Xin Long8f840e42016-04-14 15:35:33 +0800279 struct sk_buff *skb = commp->skb;
280 struct netlink_callback *cb = commp->cb;
281 const struct inet_diag_req_v2 *r = commp->r;
Xin Long1cceda782016-09-29 02:55:44 +0800282 struct sctp_association *assoc;
Xin Long8f840e42016-04-14 15:35:33 +0800283 int err = 0;
284
Xin Long8f840e42016-04-14 15:35:33 +0800285 lock_sock(sk);
Xin Long8f840e42016-04-14 15:35:33 +0800286 list_for_each_entry(assoc, &ep->asocs, asocs) {
287 if (cb->args[4] < cb->args[1])
288 goto next;
289
290 if (r->id.idiag_sport != htons(assoc->base.bind_addr.port) &&
291 r->id.idiag_sport)
292 goto next;
293 if (r->id.idiag_dport != htons(assoc->peer.port) &&
294 r->id.idiag_dport)
295 goto next;
296
297 if (!cb->args[3] &&
298 inet_sctp_diag_fill(sk, NULL, skb, r,
299 sk_user_ns(NETLINK_CB(cb->skb).sk),
300 NETLINK_CB(cb->skb).portid,
301 cb->nlh->nlmsg_seq,
302 NLM_F_MULTI, cb->nlh) < 0) {
303 cb->args[3] = 1;
Xin Long1cceda782016-09-29 02:55:44 +0800304 err = 1;
Xin Long8f840e42016-04-14 15:35:33 +0800305 goto release;
306 }
307 cb->args[3] = 1;
308
309 if (inet_sctp_diag_fill(sk, assoc, skb, r,
310 sk_user_ns(NETLINK_CB(cb->skb).sk),
311 NETLINK_CB(cb->skb).portid,
312 cb->nlh->nlmsg_seq, 0, cb->nlh) < 0) {
Xin Long1cceda782016-09-29 02:55:44 +0800313 err = 1;
Xin Long8f840e42016-04-14 15:35:33 +0800314 goto release;
315 }
316next:
317 cb->args[4]++;
318 }
319 cb->args[1] = 0;
320 cb->args[2]++;
321 cb->args[3] = 0;
322 cb->args[4] = 0;
323release:
324 release_sock(sk);
Xin Long1cceda782016-09-29 02:55:44 +0800325 sock_put(sk);
Xin Long8f840e42016-04-14 15:35:33 +0800326 return err;
Xin Long1cceda782016-09-29 02:55:44 +0800327}
328
329static int sctp_get_sock(struct sctp_transport *tsp, void *p)
330{
331 struct sctp_endpoint *ep = tsp->asoc->ep;
332 struct sctp_comm_param *commp = p;
333 struct sock *sk = ep->base.sk;
334 struct netlink_callback *cb = commp->cb;
335 const struct inet_diag_req_v2 *r = commp->r;
336 struct sctp_association *assoc =
337 list_entry(ep->asocs.next, struct sctp_association, asocs);
338
339 /* find the ep only once through the transports by this condition */
340 if (tsp->asoc != assoc)
341 goto out;
342
343 if (r->sdiag_family != AF_UNSPEC && sk->sk_family != r->sdiag_family)
344 goto out;
345
346 sock_hold(sk);
347 cb->args[5] = (long)sk;
348
349 return 1;
350
Xin Long8f840e42016-04-14 15:35:33 +0800351out:
352 cb->args[2]++;
Xin Long1cceda782016-09-29 02:55:44 +0800353 return 0;
Xin Long8f840e42016-04-14 15:35:33 +0800354}
355
356static int sctp_ep_dump(struct sctp_endpoint *ep, void *p)
357{
358 struct sctp_comm_param *commp = p;
359 struct sock *sk = ep->base.sk;
360 struct sk_buff *skb = commp->skb;
361 struct netlink_callback *cb = commp->cb;
362 const struct inet_diag_req_v2 *r = commp->r;
363 struct net *net = sock_net(skb->sk);
364 struct inet_sock *inet = inet_sk(sk);
365 int err = 0;
366
367 if (!net_eq(sock_net(sk), net))
368 goto out;
369
370 if (cb->args[4] < cb->args[1])
371 goto next;
372
Phil Sutter1ba8d772016-08-04 12:11:57 +0200373 if (!(r->idiag_states & TCPF_LISTEN) && !list_empty(&ep->asocs))
Xin Longbed187b2016-05-26 03:09:23 +0800374 goto next;
375
Xin Long8f840e42016-04-14 15:35:33 +0800376 if (r->sdiag_family != AF_UNSPEC &&
377 sk->sk_family != r->sdiag_family)
378 goto next;
379
380 if (r->id.idiag_sport != inet->inet_sport &&
381 r->id.idiag_sport)
382 goto next;
383
384 if (r->id.idiag_dport != inet->inet_dport &&
385 r->id.idiag_dport)
386 goto next;
387
388 if (inet_sctp_diag_fill(sk, NULL, skb, r,
389 sk_user_ns(NETLINK_CB(cb->skb).sk),
390 NETLINK_CB(cb->skb).portid,
391 cb->nlh->nlmsg_seq, NLM_F_MULTI,
392 cb->nlh) < 0) {
393 err = 2;
394 goto out;
395 }
396next:
397 cb->args[4]++;
398out:
399 return err;
400}
401
402/* define the functions for sctp_diag_handler*/
403static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
404 void *info)
405{
406 struct sctp_infox *infox = (struct sctp_infox *)info;
407
408 if (infox->asoc) {
409 r->idiag_rqueue = atomic_read(&infox->asoc->rmem_alloc);
410 r->idiag_wqueue = infox->asoc->sndbuf_used;
411 } else {
412 r->idiag_rqueue = sk->sk_ack_backlog;
413 r->idiag_wqueue = sk->sk_max_ack_backlog;
414 }
415 if (infox->sctpinfo)
416 sctp_get_sctp_info(sk, infox->asoc, infox->sctpinfo);
417}
418
419static int sctp_diag_dump_one(struct sk_buff *in_skb,
420 const struct nlmsghdr *nlh,
421 const struct inet_diag_req_v2 *req)
422{
423 struct net *net = sock_net(in_skb->sk);
424 union sctp_addr laddr, paddr;
425 struct sctp_comm_param commp = {
426 .skb = in_skb,
427 .r = req,
428 .nlh = nlh,
429 };
430
431 if (req->sdiag_family == AF_INET) {
432 laddr.v4.sin_port = req->id.idiag_sport;
433 laddr.v4.sin_addr.s_addr = req->id.idiag_src[0];
434 laddr.v4.sin_family = AF_INET;
435
436 paddr.v4.sin_port = req->id.idiag_dport;
437 paddr.v4.sin_addr.s_addr = req->id.idiag_dst[0];
438 paddr.v4.sin_family = AF_INET;
439 } else {
440 laddr.v6.sin6_port = req->id.idiag_sport;
Lance Richardson232cb532016-08-23 11:40:52 -0400441 memcpy(&laddr.v6.sin6_addr, req->id.idiag_src,
442 sizeof(laddr.v6.sin6_addr));
Xin Long8f840e42016-04-14 15:35:33 +0800443 laddr.v6.sin6_family = AF_INET6;
444
445 paddr.v6.sin6_port = req->id.idiag_dport;
Lance Richardson232cb532016-08-23 11:40:52 -0400446 memcpy(&paddr.v6.sin6_addr, req->id.idiag_dst,
447 sizeof(paddr.v6.sin6_addr));
Xin Long8f840e42016-04-14 15:35:33 +0800448 paddr.v6.sin6_family = AF_INET6;
449 }
450
451 return sctp_transport_lookup_process(sctp_tsp_dump_one,
452 net, &laddr, &paddr, &commp);
453}
454
455static void sctp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
456 const struct inet_diag_req_v2 *r, struct nlattr *bc)
457{
458 u32 idiag_states = r->idiag_states;
459 struct net *net = sock_net(skb->sk);
460 struct sctp_comm_param commp = {
461 .skb = skb,
462 .cb = cb,
463 .r = r,
464 };
465
466 /* eps hashtable dumps
467 * args:
468 * 0 : if it will traversal listen sock
469 * 1 : to record the sock pos of this time's traversal
470 * 4 : to work as a temporary variable to traversal list
471 */
472 if (cb->args[0] == 0) {
473 if (!(idiag_states & TCPF_LISTEN))
474 goto skip;
475 if (sctp_for_each_endpoint(sctp_ep_dump, &commp))
476 goto done;
477skip:
478 cb->args[0] = 1;
479 cb->args[1] = 0;
480 cb->args[4] = 0;
481 }
482
483 /* asocs by transport hashtable dump
484 * args:
485 * 1 : to record the assoc pos of this time's traversal
486 * 2 : to record the transport pos of this time's traversal
487 * 3 : to mark if we have dumped the ep info of the current asoc
488 * 4 : to work as a temporary variable to traversal list
Xin Long1cceda782016-09-29 02:55:44 +0800489 * 5 : to save the sk we get from travelsing the tsp list.
Xin Long8f840e42016-04-14 15:35:33 +0800490 */
Phil Sutter1ba8d772016-08-04 12:11:57 +0200491 if (!(idiag_states & ~(TCPF_LISTEN | TCPF_CLOSE)))
Xin Long8f840e42016-04-14 15:35:33 +0800492 goto done;
Xin Long1cceda782016-09-29 02:55:44 +0800493
494next:
495 cb->args[5] = 0;
496 sctp_for_each_transport(sctp_get_sock, net, cb->args[2], &commp);
497
498 if (cb->args[5] && !sctp_sock_dump((struct sock *)cb->args[5], &commp))
499 goto next;
500
Xin Long8f840e42016-04-14 15:35:33 +0800501done:
502 cb->args[1] = cb->args[4];
503 cb->args[4] = 0;
504}
505
506static const struct inet_diag_handler sctp_diag_handler = {
507 .dump = sctp_diag_dump,
508 .dump_one = sctp_diag_dump_one,
509 .idiag_get_info = sctp_diag_get_info,
510 .idiag_type = IPPROTO_SCTP,
511 .idiag_info_size = sizeof(struct sctp_info),
512};
513
514static int __init sctp_diag_init(void)
515{
516 return inet_diag_register(&sctp_diag_handler);
517}
518
519static void __exit sctp_diag_exit(void)
520{
521 inet_diag_unregister(&sctp_diag_handler);
522}
523
524module_init(sctp_diag_init);
525module_exit(sctp_diag_exit);
526MODULE_LICENSE("GPL");
527MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-132);