blob: b34d0de2409167a4564056b1d3e0505e5e681992 [file] [log] [blame]
Pavel Emelyanov96ec6322012-08-13 05:53:28 +00001#include <linux/module.h>
2#include <linux/sock_diag.h>
3#include <linux/net.h>
Pavel Emelyanoveea68e22012-08-13 05:57:44 +00004#include <linux/netdevice.h>
Pavel Emelyanov96ec6322012-08-13 05:53:28 +00005#include <linux/packet_diag.h>
Daniel Borkmannb0138402014-01-15 16:25:36 +01006#include <linux/percpu.h>
Pavel Emelyanov96ec6322012-08-13 05:53:28 +00007#include <net/net_namespace.h>
8#include <net/sock.h>
9
10#include "internal.h"
11
Pavel Emelyanov8a360be2012-08-13 05:55:46 +000012static int pdiag_put_info(const struct packet_sock *po, struct sk_buff *nlskb)
13{
14 struct packet_diag_info pinfo;
15
16 pinfo.pdi_index = po->ifindex;
17 pinfo.pdi_version = po->tp_version;
18 pinfo.pdi_reserve = po->tp_reserve;
19 pinfo.pdi_copy_thresh = po->copy_thresh;
20 pinfo.pdi_tstamp = po->tp_tstamp;
21
22 pinfo.pdi_flags = 0;
23 if (po->running)
24 pinfo.pdi_flags |= PDI_RUNNING;
25 if (po->auxdata)
26 pinfo.pdi_flags |= PDI_AUXDATA;
27 if (po->origdev)
28 pinfo.pdi_flags |= PDI_ORIGDEV;
29 if (po->has_vnet_hdr)
30 pinfo.pdi_flags |= PDI_VNETHDR;
31 if (po->tp_loss)
32 pinfo.pdi_flags |= PDI_LOSS;
33
34 return nla_put(nlskb, PACKET_DIAG_INFO, sizeof(pinfo), &pinfo);
35}
36
Pavel Emelyanoveea68e22012-08-13 05:57:44 +000037static int pdiag_put_mclist(const struct packet_sock *po, struct sk_buff *nlskb)
38{
39 struct nlattr *mca;
40 struct packet_mclist *ml;
41
42 mca = nla_nest_start(nlskb, PACKET_DIAG_MCLIST);
43 if (!mca)
44 return -EMSGSIZE;
45
46 rtnl_lock();
47 for (ml = po->mclist; ml; ml = ml->next) {
48 struct packet_diag_mclist *dml;
49
50 dml = nla_reserve_nohdr(nlskb, sizeof(*dml));
51 if (!dml) {
52 rtnl_unlock();
53 nla_nest_cancel(nlskb, mca);
54 return -EMSGSIZE;
55 }
56
57 dml->pdmc_index = ml->ifindex;
58 dml->pdmc_type = ml->type;
59 dml->pdmc_alen = ml->alen;
60 dml->pdmc_count = ml->count;
61 BUILD_BUG_ON(sizeof(dml->pdmc_addr) != sizeof(ml->addr));
62 memcpy(dml->pdmc_addr, ml->addr, sizeof(ml->addr));
63 }
64
65 rtnl_unlock();
66 nla_nest_end(nlskb, mca);
67
68 return 0;
69}
70
Pavel Emelyanov16f01362012-08-16 05:34:22 +000071static int pdiag_put_ring(struct packet_ring_buffer *ring, int ver, int nl_type,
72 struct sk_buff *nlskb)
73{
74 struct packet_diag_ring pdr;
75
76 if (!ring->pg_vec || ((ver > TPACKET_V2) &&
77 (nl_type == PACKET_DIAG_TX_RING)))
78 return 0;
79
80 pdr.pdr_block_size = ring->pg_vec_pages << PAGE_SHIFT;
81 pdr.pdr_block_nr = ring->pg_vec_len;
82 pdr.pdr_frame_size = ring->frame_size;
83 pdr.pdr_frame_nr = ring->frame_max + 1;
84
85 if (ver > TPACKET_V2) {
86 pdr.pdr_retire_tmo = ring->prb_bdqc.retire_blk_tov;
87 pdr.pdr_sizeof_priv = ring->prb_bdqc.blk_sizeof_priv;
88 pdr.pdr_features = ring->prb_bdqc.feature_req_word;
89 } else {
90 pdr.pdr_retire_tmo = 0;
91 pdr.pdr_sizeof_priv = 0;
92 pdr.pdr_features = 0;
93 }
94
95 return nla_put(nlskb, nl_type, sizeof(pdr), &pdr);
96}
97
98static int pdiag_put_rings_cfg(struct packet_sock *po, struct sk_buff *skb)
99{
100 int ret;
101
102 mutex_lock(&po->pg_vec_lock);
103 ret = pdiag_put_ring(&po->rx_ring, po->tp_version,
104 PACKET_DIAG_RX_RING, skb);
105 if (!ret)
106 ret = pdiag_put_ring(&po->tx_ring, po->tp_version,
107 PACKET_DIAG_TX_RING, skb);
108 mutex_unlock(&po->pg_vec_lock);
109
110 return ret;
111}
112
Pavel Emelyanovfff33212012-08-16 05:36:48 +0000113static int pdiag_put_fanout(struct packet_sock *po, struct sk_buff *nlskb)
114{
115 int ret = 0;
116
117 mutex_lock(&fanout_mutex);
118 if (po->fanout) {
119 u32 val;
120
121 val = (u32)po->fanout->id | ((u32)po->fanout->type << 16);
122 ret = nla_put_u32(nlskb, PACKET_DIAG_FANOUT, val);
123 }
124 mutex_unlock(&fanout_mutex);
125
126 return ret;
127}
128
Nicolas Dichtel62641902013-04-25 06:53:52 +0000129static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
130 struct packet_diag_req *req,
Eric W. Biedermana53b72c2014-04-23 14:26:25 -0700131 bool may_report_filterinfo,
Nicolas Dichtel62641902013-04-25 06:53:52 +0000132 struct user_namespace *user_ns,
133 u32 portid, u32 seq, u32 flags, int sk_ino)
Pavel Emelyanov96ec6322012-08-13 05:53:28 +0000134{
135 struct nlmsghdr *nlh;
136 struct packet_diag_msg *rp;
Pavel Emelyanov16f01362012-08-16 05:34:22 +0000137 struct packet_sock *po = pkt_sk(sk);
Pavel Emelyanov96ec6322012-08-13 05:53:28 +0000138
Eric W. Biederman15e47302012-09-07 20:12:54 +0000139 nlh = nlmsg_put(skb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rp), flags);
Pavel Emelyanov96ec6322012-08-13 05:53:28 +0000140 if (!nlh)
141 return -EMSGSIZE;
142
143 rp = nlmsg_data(nlh);
144 rp->pdiag_family = AF_PACKET;
145 rp->pdiag_type = sk->sk_type;
146 rp->pdiag_num = ntohs(po->num);
147 rp->pdiag_ino = sk_ino;
148 sock_diag_save_cookie(sk, rp->pdiag_cookie);
149
Pavel Emelyanov8a360be2012-08-13 05:55:46 +0000150 if ((req->pdiag_show & PACKET_SHOW_INFO) &&
151 pdiag_put_info(po, skb))
152 goto out_nlmsg_trim;
153
Nicolas Dichtel62641902013-04-25 06:53:52 +0000154 if ((req->pdiag_show & PACKET_SHOW_INFO) &&
155 nla_put_u32(skb, PACKET_DIAG_UID,
156 from_kuid_munged(user_ns, sock_i_uid(sk))))
157 goto out_nlmsg_trim;
158
Pavel Emelyanoveea68e22012-08-13 05:57:44 +0000159 if ((req->pdiag_show & PACKET_SHOW_MCLIST) &&
160 pdiag_put_mclist(po, skb))
161 goto out_nlmsg_trim;
162
Pavel Emelyanov16f01362012-08-16 05:34:22 +0000163 if ((req->pdiag_show & PACKET_SHOW_RING_CFG) &&
164 pdiag_put_rings_cfg(po, skb))
165 goto out_nlmsg_trim;
166
Pavel Emelyanovfff33212012-08-16 05:36:48 +0000167 if ((req->pdiag_show & PACKET_SHOW_FANOUT) &&
168 pdiag_put_fanout(po, skb))
169 goto out_nlmsg_trim;
170
Nicolas Dichtel76d0eeb2013-04-25 06:53:53 +0000171 if ((req->pdiag_show & PACKET_SHOW_MEMINFO) &&
172 sock_diag_put_meminfo(sk, skb, PACKET_DIAG_MEMINFO))
173 goto out_nlmsg_trim;
174
Nicolas Dichtele8d96122013-04-25 06:53:54 +0000175 if ((req->pdiag_show & PACKET_SHOW_FILTER) &&
Eric W. Biedermana53b72c2014-04-23 14:26:25 -0700176 sock_diag_put_filterinfo(may_report_filterinfo, sk, skb,
177 PACKET_DIAG_FILTER))
Nicolas Dichtele8d96122013-04-25 06:53:54 +0000178 goto out_nlmsg_trim;
179
Pavel Emelyanov96ec6322012-08-13 05:53:28 +0000180 return nlmsg_end(skb, nlh);
Pavel Emelyanov8a360be2012-08-13 05:55:46 +0000181
182out_nlmsg_trim:
183 nlmsg_cancel(skb, nlh);
184 return -EMSGSIZE;
Pavel Emelyanov96ec6322012-08-13 05:53:28 +0000185}
186
187static int packet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
188{
189 int num = 0, s_num = cb->args[0];
190 struct packet_diag_req *req;
191 struct net *net;
192 struct sock *sk;
Eric W. Biedermana53b72c2014-04-23 14:26:25 -0700193 bool may_report_filterinfo;
Pavel Emelyanov96ec6322012-08-13 05:53:28 +0000194
195 net = sock_net(skb->sk);
196 req = nlmsg_data(cb->nlh);
Eric W. Biedermana53b72c2014-04-23 14:26:25 -0700197 may_report_filterinfo = ns_capable(net->user_ns, CAP_NET_ADMIN);
Pavel Emelyanov96ec6322012-08-13 05:53:28 +0000198
Pavel Emelyanov0fa7fa92012-08-21 01:06:47 +0000199 mutex_lock(&net->packet.sklist_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800200 sk_for_each(sk, &net->packet.sklist) {
Pavel Emelyanov96ec6322012-08-13 05:53:28 +0000201 if (!net_eq(sock_net(sk), net))
202 continue;
203 if (num < s_num)
204 goto next;
205
Nicolas Dichtel62641902013-04-25 06:53:52 +0000206 if (sk_diag_fill(sk, skb, req,
Eric W. Biedermana53b72c2014-04-23 14:26:25 -0700207 may_report_filterinfo,
Nicolas Dichtel62641902013-04-25 06:53:52 +0000208 sk_user_ns(NETLINK_CB(cb->skb).sk),
209 NETLINK_CB(cb->skb).portid,
210 cb->nlh->nlmsg_seq, NLM_F_MULTI,
211 sock_i_ino(sk)) < 0)
Pavel Emelyanov96ec6322012-08-13 05:53:28 +0000212 goto done;
213next:
214 num++;
215 }
216done:
Pavel Emelyanov0fa7fa92012-08-21 01:06:47 +0000217 mutex_unlock(&net->packet.sklist_lock);
Pavel Emelyanov96ec6322012-08-13 05:53:28 +0000218 cb->args[0] = num;
219
220 return skb->len;
221}
222
223static int packet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
224{
225 int hdrlen = sizeof(struct packet_diag_req);
226 struct net *net = sock_net(skb->sk);
227 struct packet_diag_req *req;
228
229 if (nlmsg_len(h) < hdrlen)
230 return -EINVAL;
231
232 req = nlmsg_data(h);
233 /* Make it possible to support protocol filtering later */
234 if (req->sdiag_protocol)
235 return -EINVAL;
236
237 if (h->nlmsg_flags & NLM_F_DUMP) {
238 struct netlink_dump_control c = {
239 .dump = packet_diag_dump,
240 };
241 return netlink_dump_start(net->diag_nlsk, skb, h, &c);
242 } else
243 return -EOPNOTSUPP;
244}
245
246static const struct sock_diag_handler packet_diag_handler = {
247 .family = AF_PACKET,
248 .dump = packet_diag_handler_dump,
249};
250
251static int __init packet_diag_init(void)
252{
253 return sock_diag_register(&packet_diag_handler);
254}
255
256static void __exit packet_diag_exit(void)
257{
258 sock_diag_unregister(&packet_diag_handler);
259}
260
261module_init(packet_diag_init);
262module_exit(packet_diag_exit);
263MODULE_LICENSE("GPL");
264MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 17 /* AF_PACKET */);