blob: 84829fff3bc975a142bc76f217ebd160bcb6a7a9 [file] [log] [blame]
Xin Long8f840e42016-04-14 15:35:33 +08001#include <linux/module.h>
2#include <linux/inet_diag.h>
3#include <linux/sock_diag.h>
4#include <net/sctp/sctp.h>
5
6extern void inet_diag_msg_common_fill(struct inet_diag_msg *r,
7 struct sock *sk);
8extern int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
9 struct inet_diag_msg *r, int ext,
10 struct user_namespace *user_ns);
11
12static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
13 void *info);
14
15/* define some functions to make asoc/ep fill look clean */
16static void inet_diag_msg_sctpasoc_fill(struct inet_diag_msg *r,
17 struct sock *sk,
18 struct sctp_association *asoc)
19{
20 union sctp_addr laddr, paddr;
21 struct dst_entry *dst;
22
23 laddr = list_entry(asoc->base.bind_addr.address_list.next,
24 struct sctp_sockaddr_entry, list)->a;
25 paddr = asoc->peer.primary_path->ipaddr;
26 dst = asoc->peer.primary_path->dst;
27
28 r->idiag_family = sk->sk_family;
29 r->id.idiag_sport = htons(asoc->base.bind_addr.port);
30 r->id.idiag_dport = htons(asoc->peer.port);
31 r->id.idiag_if = dst ? dst->dev->ifindex : 0;
32 sock_diag_save_cookie(sk, r->id.idiag_cookie);
33
34#if IS_ENABLED(CONFIG_IPV6)
35 if (sk->sk_family == AF_INET6) {
36 *(struct in6_addr *)r->id.idiag_src = laddr.v6.sin6_addr;
37 *(struct in6_addr *)r->id.idiag_dst = paddr.v6.sin6_addr;
38 } else
39#endif
40 {
41 memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
42 memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
43
44 r->id.idiag_src[0] = laddr.v4.sin_addr.s_addr;
45 r->id.idiag_dst[0] = paddr.v4.sin_addr.s_addr;
46 }
47
48 r->idiag_state = asoc->state;
49 r->idiag_timer = SCTP_EVENT_TIMEOUT_T3_RTX;
50 r->idiag_retrans = asoc->rtx_data_chunks;
Xin Longb7de5292016-04-19 15:10:01 +080051 r->idiag_expires = jiffies_to_msecs(
52 asoc->timeouts[SCTP_EVENT_TIMEOUT_T3_RTX] - jiffies);
Xin Long8f840e42016-04-14 15:35:33 +080053}
54
55static int inet_diag_msg_sctpladdrs_fill(struct sk_buff *skb,
56 struct list_head *address_list)
57{
58 struct sctp_sockaddr_entry *laddr;
59 int addrlen = sizeof(struct sockaddr_storage);
60 int addrcnt = 0;
61 struct nlattr *attr;
62 void *info = NULL;
63
64 list_for_each_entry_rcu(laddr, address_list, list)
65 addrcnt++;
66
67 attr = nla_reserve(skb, INET_DIAG_LOCALS, addrlen * addrcnt);
68 if (!attr)
69 return -EMSGSIZE;
70
71 info = nla_data(attr);
72 list_for_each_entry_rcu(laddr, address_list, list) {
73 memcpy(info, &laddr->a, addrlen);
74 info += addrlen;
75 }
76
77 return 0;
78}
79
80static int inet_diag_msg_sctpaddrs_fill(struct sk_buff *skb,
81 struct sctp_association *asoc)
82{
83 int addrlen = sizeof(struct sockaddr_storage);
84 struct sctp_transport *from;
85 struct nlattr *attr;
86 void *info = NULL;
87
88 attr = nla_reserve(skb, INET_DIAG_PEERS,
89 addrlen * asoc->peer.transport_count);
90 if (!attr)
91 return -EMSGSIZE;
92
93 info = nla_data(attr);
94 list_for_each_entry(from, &asoc->peer.transport_addr_list,
95 transports) {
96 memcpy(info, &from->ipaddr, addrlen);
97 info += addrlen;
98 }
99
100 return 0;
101}
102
103/* sctp asoc/ep fill*/
104static int inet_sctp_diag_fill(struct sock *sk, struct sctp_association *asoc,
105 struct sk_buff *skb,
106 const struct inet_diag_req_v2 *req,
107 struct user_namespace *user_ns,
108 int portid, u32 seq, u16 nlmsg_flags,
109 const struct nlmsghdr *unlh)
110{
111 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
112 struct list_head *addr_list;
113 struct inet_diag_msg *r;
114 struct nlmsghdr *nlh;
115 int ext = req->idiag_ext;
116 struct sctp_infox infox;
117 void *info = NULL;
118
119 nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
120 nlmsg_flags);
121 if (!nlh)
122 return -EMSGSIZE;
123
124 r = nlmsg_data(nlh);
125 BUG_ON(!sk_fullsock(sk));
126
127 if (asoc) {
128 inet_diag_msg_sctpasoc_fill(r, sk, asoc);
129 } else {
130 inet_diag_msg_common_fill(r, sk);
131 r->idiag_state = sk->sk_state;
132 r->idiag_timer = 0;
133 r->idiag_retrans = 0;
134 }
135
136 if (inet_diag_msg_attrs_fill(sk, skb, r, ext, user_ns))
137 goto errout;
138
139 if (ext & (1 << (INET_DIAG_SKMEMINFO - 1))) {
140 u32 mem[SK_MEMINFO_VARS];
141 int amt;
142
143 if (asoc && asoc->ep->sndbuf_policy)
144 amt = asoc->sndbuf_used;
145 else
146 amt = sk_wmem_alloc_get(sk);
147 mem[SK_MEMINFO_WMEM_ALLOC] = amt;
148 mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk);
149 mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf;
150 mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf;
151 mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
152 mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
153 mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
154 mem[SK_MEMINFO_BACKLOG] = sk->sk_backlog.len;
155 mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
156
157 if (nla_put(skb, INET_DIAG_SKMEMINFO, sizeof(mem), &mem) < 0)
158 goto errout;
159 }
160
161 if (ext & (1 << (INET_DIAG_INFO - 1))) {
162 struct nlattr *attr;
163
Nicolas Dichtel6ed46d12016-04-26 10:06:14 +0200164 attr = nla_reserve_64bit(skb, INET_DIAG_INFO,
165 sizeof(struct sctp_info),
166 INET_DIAG_PAD);
Xin Long8f840e42016-04-14 15:35:33 +0800167 if (!attr)
168 goto errout;
169
170 info = nla_data(attr);
171 }
172 infox.sctpinfo = (struct sctp_info *)info;
173 infox.asoc = asoc;
174 sctp_diag_get_info(sk, r, &infox);
175
176 addr_list = asoc ? &asoc->base.bind_addr.address_list
177 : &ep->base.bind_addr.address_list;
178 if (inet_diag_msg_sctpladdrs_fill(skb, addr_list))
179 goto errout;
180
181 if (asoc && (ext & (1 << (INET_DIAG_CONG - 1))))
182 if (nla_put_string(skb, INET_DIAG_CONG, "reno") < 0)
183 goto errout;
184
185 if (asoc && inet_diag_msg_sctpaddrs_fill(skb, asoc))
186 goto errout;
187
188 nlmsg_end(skb, nlh);
189 return 0;
190
191errout:
192 nlmsg_cancel(skb, nlh);
193 return -EMSGSIZE;
194}
195
196/* callback and param */
197struct sctp_comm_param {
198 struct sk_buff *skb;
199 struct netlink_callback *cb;
200 const struct inet_diag_req_v2 *r;
201 const struct nlmsghdr *nlh;
202};
203
204static size_t inet_assoc_attr_size(struct sctp_association *asoc)
205{
206 int addrlen = sizeof(struct sockaddr_storage);
207 int addrcnt = 0;
208 struct sctp_sockaddr_entry *laddr;
209
210 list_for_each_entry_rcu(laddr, &asoc->base.bind_addr.address_list,
211 list)
212 addrcnt++;
213
214 return nla_total_size(sizeof(struct sctp_info))
215 + nla_total_size(1) /* INET_DIAG_SHUTDOWN */
216 + nla_total_size(1) /* INET_DIAG_TOS */
217 + nla_total_size(1) /* INET_DIAG_TCLASS */
218 + nla_total_size(addrlen * asoc->peer.transport_count)
219 + nla_total_size(addrlen * addrcnt)
220 + nla_total_size(sizeof(struct inet_diag_meminfo))
221 + nla_total_size(sizeof(struct inet_diag_msg))
222 + 64;
223}
224
225static int sctp_tsp_dump_one(struct sctp_transport *tsp, void *p)
226{
227 struct sctp_association *assoc = tsp->asoc;
228 struct sock *sk = tsp->asoc->base.sk;
229 struct sctp_comm_param *commp = p;
230 struct sk_buff *in_skb = commp->skb;
231 const struct inet_diag_req_v2 *req = commp->r;
232 const struct nlmsghdr *nlh = commp->nlh;
233 struct net *net = sock_net(in_skb->sk);
234 struct sk_buff *rep;
235 int err;
236
237 err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
238 if (err)
239 goto out;
240
241 err = -ENOMEM;
242 rep = nlmsg_new(inet_assoc_attr_size(assoc), GFP_KERNEL);
243 if (!rep)
244 goto out;
245
246 lock_sock(sk);
247 if (sk != assoc->base.sk) {
248 release_sock(sk);
249 sk = assoc->base.sk;
250 lock_sock(sk);
251 }
252 err = inet_sctp_diag_fill(sk, assoc, rep, req,
253 sk_user_ns(NETLINK_CB(in_skb).sk),
254 NETLINK_CB(in_skb).portid,
255 nlh->nlmsg_seq, 0, nlh);
256 release_sock(sk);
257 if (err < 0) {
258 WARN_ON(err == -EMSGSIZE);
259 kfree_skb(rep);
260 goto out;
261 }
262
263 err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
264 MSG_DONTWAIT);
265 if (err > 0)
266 err = 0;
267out:
268 return err;
269}
270
271static int sctp_tsp_dump(struct sctp_transport *tsp, void *p)
272{
273 struct sctp_endpoint *ep = tsp->asoc->ep;
274 struct sctp_comm_param *commp = p;
275 struct sock *sk = ep->base.sk;
276 struct sk_buff *skb = commp->skb;
277 struct netlink_callback *cb = commp->cb;
278 const struct inet_diag_req_v2 *r = commp->r;
279 struct sctp_association *assoc =
280 list_entry(ep->asocs.next, struct sctp_association, asocs);
281 int err = 0;
282
283 /* find the ep only once through the transports by this condition */
284 if (tsp->asoc != assoc)
285 goto out;
286
287 if (r->sdiag_family != AF_UNSPEC && sk->sk_family != r->sdiag_family)
288 goto out;
289
290 lock_sock(sk);
291 if (sk != assoc->base.sk)
292 goto release;
293 list_for_each_entry(assoc, &ep->asocs, asocs) {
294 if (cb->args[4] < cb->args[1])
295 goto next;
296
297 if (r->id.idiag_sport != htons(assoc->base.bind_addr.port) &&
298 r->id.idiag_sport)
299 goto next;
300 if (r->id.idiag_dport != htons(assoc->peer.port) &&
301 r->id.idiag_dport)
302 goto next;
303
304 if (!cb->args[3] &&
305 inet_sctp_diag_fill(sk, NULL, skb, r,
306 sk_user_ns(NETLINK_CB(cb->skb).sk),
307 NETLINK_CB(cb->skb).portid,
308 cb->nlh->nlmsg_seq,
309 NLM_F_MULTI, cb->nlh) < 0) {
310 cb->args[3] = 1;
311 err = 2;
312 goto release;
313 }
314 cb->args[3] = 1;
315
316 if (inet_sctp_diag_fill(sk, assoc, skb, r,
317 sk_user_ns(NETLINK_CB(cb->skb).sk),
318 NETLINK_CB(cb->skb).portid,
319 cb->nlh->nlmsg_seq, 0, cb->nlh) < 0) {
320 err = 2;
321 goto release;
322 }
323next:
324 cb->args[4]++;
325 }
326 cb->args[1] = 0;
327 cb->args[2]++;
328 cb->args[3] = 0;
329 cb->args[4] = 0;
330release:
331 release_sock(sk);
332 return err;
333out:
334 cb->args[2]++;
335 return err;
336}
337
338static int sctp_ep_dump(struct sctp_endpoint *ep, void *p)
339{
340 struct sctp_comm_param *commp = p;
341 struct sock *sk = ep->base.sk;
342 struct sk_buff *skb = commp->skb;
343 struct netlink_callback *cb = commp->cb;
344 const struct inet_diag_req_v2 *r = commp->r;
345 struct net *net = sock_net(skb->sk);
346 struct inet_sock *inet = inet_sk(sk);
347 int err = 0;
348
349 if (!net_eq(sock_net(sk), net))
350 goto out;
351
352 if (cb->args[4] < cb->args[1])
353 goto next;
354
355 if (r->sdiag_family != AF_UNSPEC &&
356 sk->sk_family != r->sdiag_family)
357 goto next;
358
359 if (r->id.idiag_sport != inet->inet_sport &&
360 r->id.idiag_sport)
361 goto next;
362
363 if (r->id.idiag_dport != inet->inet_dport &&
364 r->id.idiag_dport)
365 goto next;
366
367 if (inet_sctp_diag_fill(sk, NULL, skb, r,
368 sk_user_ns(NETLINK_CB(cb->skb).sk),
369 NETLINK_CB(cb->skb).portid,
370 cb->nlh->nlmsg_seq, NLM_F_MULTI,
371 cb->nlh) < 0) {
372 err = 2;
373 goto out;
374 }
375next:
376 cb->args[4]++;
377out:
378 return err;
379}
380
381/* define the functions for sctp_diag_handler*/
382static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
383 void *info)
384{
385 struct sctp_infox *infox = (struct sctp_infox *)info;
386
387 if (infox->asoc) {
388 r->idiag_rqueue = atomic_read(&infox->asoc->rmem_alloc);
389 r->idiag_wqueue = infox->asoc->sndbuf_used;
390 } else {
391 r->idiag_rqueue = sk->sk_ack_backlog;
392 r->idiag_wqueue = sk->sk_max_ack_backlog;
393 }
394 if (infox->sctpinfo)
395 sctp_get_sctp_info(sk, infox->asoc, infox->sctpinfo);
396}
397
398static int sctp_diag_dump_one(struct sk_buff *in_skb,
399 const struct nlmsghdr *nlh,
400 const struct inet_diag_req_v2 *req)
401{
402 struct net *net = sock_net(in_skb->sk);
403 union sctp_addr laddr, paddr;
404 struct sctp_comm_param commp = {
405 .skb = in_skb,
406 .r = req,
407 .nlh = nlh,
408 };
409
410 if (req->sdiag_family == AF_INET) {
411 laddr.v4.sin_port = req->id.idiag_sport;
412 laddr.v4.sin_addr.s_addr = req->id.idiag_src[0];
413 laddr.v4.sin_family = AF_INET;
414
415 paddr.v4.sin_port = req->id.idiag_dport;
416 paddr.v4.sin_addr.s_addr = req->id.idiag_dst[0];
417 paddr.v4.sin_family = AF_INET;
418 } else {
419 laddr.v6.sin6_port = req->id.idiag_sport;
420 memcpy(&laddr.v6.sin6_addr, req->id.idiag_src, 64);
421 laddr.v6.sin6_family = AF_INET6;
422
423 paddr.v6.sin6_port = req->id.idiag_dport;
424 memcpy(&paddr.v6.sin6_addr, req->id.idiag_dst, 64);
425 paddr.v6.sin6_family = AF_INET6;
426 }
427
428 return sctp_transport_lookup_process(sctp_tsp_dump_one,
429 net, &laddr, &paddr, &commp);
430}
431
432static void sctp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
433 const struct inet_diag_req_v2 *r, struct nlattr *bc)
434{
435 u32 idiag_states = r->idiag_states;
436 struct net *net = sock_net(skb->sk);
437 struct sctp_comm_param commp = {
438 .skb = skb,
439 .cb = cb,
440 .r = r,
441 };
442
443 /* eps hashtable dumps
444 * args:
445 * 0 : if it will traversal listen sock
446 * 1 : to record the sock pos of this time's traversal
447 * 4 : to work as a temporary variable to traversal list
448 */
449 if (cb->args[0] == 0) {
450 if (!(idiag_states & TCPF_LISTEN))
451 goto skip;
452 if (sctp_for_each_endpoint(sctp_ep_dump, &commp))
453 goto done;
454skip:
455 cb->args[0] = 1;
456 cb->args[1] = 0;
457 cb->args[4] = 0;
458 }
459
460 /* asocs by transport hashtable dump
461 * args:
462 * 1 : to record the assoc pos of this time's traversal
463 * 2 : to record the transport pos of this time's traversal
464 * 3 : to mark if we have dumped the ep info of the current asoc
465 * 4 : to work as a temporary variable to traversal list
466 */
467 if (!(idiag_states & ~TCPF_LISTEN))
468 goto done;
469 sctp_for_each_transport(sctp_tsp_dump, net, cb->args[2], &commp);
470done:
471 cb->args[1] = cb->args[4];
472 cb->args[4] = 0;
473}
474
475static const struct inet_diag_handler sctp_diag_handler = {
476 .dump = sctp_diag_dump,
477 .dump_one = sctp_diag_dump_one,
478 .idiag_get_info = sctp_diag_get_info,
479 .idiag_type = IPPROTO_SCTP,
480 .idiag_info_size = sizeof(struct sctp_info),
481};
482
483static int __init sctp_diag_init(void)
484{
485 return inet_diag_register(&sctp_diag_handler);
486}
487
488static void __exit sctp_diag_exit(void)
489{
490 inet_diag_unregister(&sctp_diag_handler);
491}
492
493module_init(sctp_diag_init);
494module_exit(sctp_diag_exit);
495MODULE_LICENSE("GPL");
496MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-132);