blob: 822fe9b33a49b0baa012ef82f81b740e68b383a7 [file] [log] [blame]
Pavel Emelyanov96ec6322012-08-13 05:53:28 +00001#include <linux/module.h>
2#include <linux/sock_diag.h>
3#include <linux/net.h>
Pavel Emelyanoveea68e22012-08-13 05:57:44 +00004#include <linux/netdevice.h>
Pavel Emelyanov96ec6322012-08-13 05:53:28 +00005#include <linux/packet_diag.h>
6#include <net/net_namespace.h>
7#include <net/sock.h>
8
9#include "internal.h"
10
Pavel Emelyanov8a360be2012-08-13 05:55:46 +000011static int pdiag_put_info(const struct packet_sock *po, struct sk_buff *nlskb)
12{
13 struct packet_diag_info pinfo;
14
15 pinfo.pdi_index = po->ifindex;
16 pinfo.pdi_version = po->tp_version;
17 pinfo.pdi_reserve = po->tp_reserve;
18 pinfo.pdi_copy_thresh = po->copy_thresh;
19 pinfo.pdi_tstamp = po->tp_tstamp;
20
21 pinfo.pdi_flags = 0;
22 if (po->running)
23 pinfo.pdi_flags |= PDI_RUNNING;
24 if (po->auxdata)
25 pinfo.pdi_flags |= PDI_AUXDATA;
26 if (po->origdev)
27 pinfo.pdi_flags |= PDI_ORIGDEV;
28 if (po->has_vnet_hdr)
29 pinfo.pdi_flags |= PDI_VNETHDR;
30 if (po->tp_loss)
31 pinfo.pdi_flags |= PDI_LOSS;
32
33 return nla_put(nlskb, PACKET_DIAG_INFO, sizeof(pinfo), &pinfo);
34}
35
Pavel Emelyanoveea68e22012-08-13 05:57:44 +000036static int pdiag_put_mclist(const struct packet_sock *po, struct sk_buff *nlskb)
37{
38 struct nlattr *mca;
39 struct packet_mclist *ml;
40
41 mca = nla_nest_start(nlskb, PACKET_DIAG_MCLIST);
42 if (!mca)
43 return -EMSGSIZE;
44
45 rtnl_lock();
46 for (ml = po->mclist; ml; ml = ml->next) {
47 struct packet_diag_mclist *dml;
48
49 dml = nla_reserve_nohdr(nlskb, sizeof(*dml));
50 if (!dml) {
51 rtnl_unlock();
52 nla_nest_cancel(nlskb, mca);
53 return -EMSGSIZE;
54 }
55
56 dml->pdmc_index = ml->ifindex;
57 dml->pdmc_type = ml->type;
58 dml->pdmc_alen = ml->alen;
59 dml->pdmc_count = ml->count;
60 BUILD_BUG_ON(sizeof(dml->pdmc_addr) != sizeof(ml->addr));
61 memcpy(dml->pdmc_addr, ml->addr, sizeof(ml->addr));
62 }
63
64 rtnl_unlock();
65 nla_nest_end(nlskb, mca);
66
67 return 0;
68}
69
Pavel Emelyanov16f01362012-08-16 05:34:22 +000070static int pdiag_put_ring(struct packet_ring_buffer *ring, int ver, int nl_type,
71 struct sk_buff *nlskb)
72{
73 struct packet_diag_ring pdr;
74
75 if (!ring->pg_vec || ((ver > TPACKET_V2) &&
76 (nl_type == PACKET_DIAG_TX_RING)))
77 return 0;
78
79 pdr.pdr_block_size = ring->pg_vec_pages << PAGE_SHIFT;
80 pdr.pdr_block_nr = ring->pg_vec_len;
81 pdr.pdr_frame_size = ring->frame_size;
82 pdr.pdr_frame_nr = ring->frame_max + 1;
83
84 if (ver > TPACKET_V2) {
85 pdr.pdr_retire_tmo = ring->prb_bdqc.retire_blk_tov;
86 pdr.pdr_sizeof_priv = ring->prb_bdqc.blk_sizeof_priv;
87 pdr.pdr_features = ring->prb_bdqc.feature_req_word;
88 } else {
89 pdr.pdr_retire_tmo = 0;
90 pdr.pdr_sizeof_priv = 0;
91 pdr.pdr_features = 0;
92 }
93
94 return nla_put(nlskb, nl_type, sizeof(pdr), &pdr);
95}
96
97static int pdiag_put_rings_cfg(struct packet_sock *po, struct sk_buff *skb)
98{
99 int ret;
100
101 mutex_lock(&po->pg_vec_lock);
102 ret = pdiag_put_ring(&po->rx_ring, po->tp_version,
103 PACKET_DIAG_RX_RING, skb);
104 if (!ret)
105 ret = pdiag_put_ring(&po->tx_ring, po->tp_version,
106 PACKET_DIAG_TX_RING, skb);
107 mutex_unlock(&po->pg_vec_lock);
108
109 return ret;
110}
111
Pavel Emelyanovfff33212012-08-16 05:36:48 +0000112static int pdiag_put_fanout(struct packet_sock *po, struct sk_buff *nlskb)
113{
114 int ret = 0;
115
116 mutex_lock(&fanout_mutex);
117 if (po->fanout) {
118 u32 val;
119
120 val = (u32)po->fanout->id | ((u32)po->fanout->type << 16);
121 ret = nla_put_u32(nlskb, PACKET_DIAG_FANOUT, val);
122 }
123 mutex_unlock(&fanout_mutex);
124
125 return ret;
126}
127
Nicolas Dichtel62641902013-04-25 06:53:52 +0000128static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
129 struct packet_diag_req *req,
130 struct user_namespace *user_ns,
131 u32 portid, u32 seq, u32 flags, int sk_ino)
Pavel Emelyanov96ec6322012-08-13 05:53:28 +0000132{
133 struct nlmsghdr *nlh;
134 struct packet_diag_msg *rp;
Pavel Emelyanov16f01362012-08-16 05:34:22 +0000135 struct packet_sock *po = pkt_sk(sk);
Pavel Emelyanov96ec6322012-08-13 05:53:28 +0000136
Eric W. Biederman15e47302012-09-07 20:12:54 +0000137 nlh = nlmsg_put(skb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rp), flags);
Pavel Emelyanov96ec6322012-08-13 05:53:28 +0000138 if (!nlh)
139 return -EMSGSIZE;
140
141 rp = nlmsg_data(nlh);
142 rp->pdiag_family = AF_PACKET;
143 rp->pdiag_type = sk->sk_type;
144 rp->pdiag_num = ntohs(po->num);
145 rp->pdiag_ino = sk_ino;
146 sock_diag_save_cookie(sk, rp->pdiag_cookie);
147
Pavel Emelyanov8a360be2012-08-13 05:55:46 +0000148 if ((req->pdiag_show & PACKET_SHOW_INFO) &&
149 pdiag_put_info(po, skb))
150 goto out_nlmsg_trim;
151
Nicolas Dichtel62641902013-04-25 06:53:52 +0000152 if ((req->pdiag_show & PACKET_SHOW_INFO) &&
153 nla_put_u32(skb, PACKET_DIAG_UID,
154 from_kuid_munged(user_ns, sock_i_uid(sk))))
155 goto out_nlmsg_trim;
156
Pavel Emelyanoveea68e22012-08-13 05:57:44 +0000157 if ((req->pdiag_show & PACKET_SHOW_MCLIST) &&
158 pdiag_put_mclist(po, skb))
159 goto out_nlmsg_trim;
160
Pavel Emelyanov16f01362012-08-16 05:34:22 +0000161 if ((req->pdiag_show & PACKET_SHOW_RING_CFG) &&
162 pdiag_put_rings_cfg(po, skb))
163 goto out_nlmsg_trim;
164
Pavel Emelyanovfff33212012-08-16 05:36:48 +0000165 if ((req->pdiag_show & PACKET_SHOW_FANOUT) &&
166 pdiag_put_fanout(po, skb))
167 goto out_nlmsg_trim;
168
Nicolas Dichtel76d0eeb2013-04-25 06:53:53 +0000169 if ((req->pdiag_show & PACKET_SHOW_MEMINFO) &&
170 sock_diag_put_meminfo(sk, skb, PACKET_DIAG_MEMINFO))
171 goto out_nlmsg_trim;
172
Pavel Emelyanov96ec6322012-08-13 05:53:28 +0000173 return nlmsg_end(skb, nlh);
Pavel Emelyanov8a360be2012-08-13 05:55:46 +0000174
175out_nlmsg_trim:
176 nlmsg_cancel(skb, nlh);
177 return -EMSGSIZE;
Pavel Emelyanov96ec6322012-08-13 05:53:28 +0000178}
179
180static int packet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
181{
182 int num = 0, s_num = cb->args[0];
183 struct packet_diag_req *req;
184 struct net *net;
185 struct sock *sk;
Pavel Emelyanov96ec6322012-08-13 05:53:28 +0000186
187 net = sock_net(skb->sk);
188 req = nlmsg_data(cb->nlh);
189
Pavel Emelyanov0fa7fa92012-08-21 01:06:47 +0000190 mutex_lock(&net->packet.sklist_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800191 sk_for_each(sk, &net->packet.sklist) {
Pavel Emelyanov96ec6322012-08-13 05:53:28 +0000192 if (!net_eq(sock_net(sk), net))
193 continue;
194 if (num < s_num)
195 goto next;
196
Nicolas Dichtel62641902013-04-25 06:53:52 +0000197 if (sk_diag_fill(sk, skb, req,
198 sk_user_ns(NETLINK_CB(cb->skb).sk),
199 NETLINK_CB(cb->skb).portid,
200 cb->nlh->nlmsg_seq, NLM_F_MULTI,
201 sock_i_ino(sk)) < 0)
Pavel Emelyanov96ec6322012-08-13 05:53:28 +0000202 goto done;
203next:
204 num++;
205 }
206done:
Pavel Emelyanov0fa7fa92012-08-21 01:06:47 +0000207 mutex_unlock(&net->packet.sklist_lock);
Pavel Emelyanov96ec6322012-08-13 05:53:28 +0000208 cb->args[0] = num;
209
210 return skb->len;
211}
212
213static int packet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
214{
215 int hdrlen = sizeof(struct packet_diag_req);
216 struct net *net = sock_net(skb->sk);
217 struct packet_diag_req *req;
218
219 if (nlmsg_len(h) < hdrlen)
220 return -EINVAL;
221
222 req = nlmsg_data(h);
223 /* Make it possible to support protocol filtering later */
224 if (req->sdiag_protocol)
225 return -EINVAL;
226
227 if (h->nlmsg_flags & NLM_F_DUMP) {
228 struct netlink_dump_control c = {
229 .dump = packet_diag_dump,
230 };
231 return netlink_dump_start(net->diag_nlsk, skb, h, &c);
232 } else
233 return -EOPNOTSUPP;
234}
235
236static const struct sock_diag_handler packet_diag_handler = {
237 .family = AF_PACKET,
238 .dump = packet_diag_handler_dump,
239};
240
241static int __init packet_diag_init(void)
242{
243 return sock_diag_register(&packet_diag_handler);
244}
245
246static void __exit packet_diag_exit(void)
247{
248 sock_diag_unregister(&packet_diag_handler);
249}
250
251module_init(packet_diag_init);
252module_exit(packet_diag_exit);
253MODULE_LICENSE("GPL");
254MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 17 /* AF_PACKET */);