blob: 74dddf84adcdd7fea05ca93d94f97c6558917ae1 [file] [log] [blame]
Pavel Emelyanov8ef874b2011-12-06 07:59:52 +00001#include <linux/mutex.h>
2#include <linux/socket.h>
3#include <linux/skbuff.h>
4#include <net/netlink.h>
5#include <net/net_namespace.h>
6#include <linux/module.h>
Pavel Emelyanov5d2e5f22011-12-30 00:53:13 +00007#include <net/sock.h>
Pavel Emelyanov8ef874b2011-12-06 07:59:52 +00008
9#include <linux/inet_diag.h>
10#include <linux/sock_diag.h>
11
Shan Wei8dcf01f2012-04-24 18:21:07 +000012static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
Pavel Emelyanov8ef874b2011-12-06 07:59:52 +000013static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
14static DEFINE_MUTEX(sock_diag_table_mutex);
15
Eric Dumazet33cf7c92015-03-11 18:53:14 -070016static u64 sock_gen_cookie(struct sock *sk)
Pavel Emelyanovf65c1b52011-12-15 02:43:44 +000017{
Eric Dumazet33cf7c92015-03-11 18:53:14 -070018 while (1) {
19 u64 res = atomic64_read(&sk->sk_cookie);
20
21 if (res)
22 return res;
23 res = atomic64_inc_return(&sock_net(sk)->cookie_gen);
24 atomic64_cmpxchg(&sk->sk_cookie, 0, res);
25 }
26}
27
28int sock_diag_check_cookie(struct sock *sk, const __u32 *cookie)
29{
30 u64 res;
31
32 if (cookie[0] == INET_DIAG_NOCOOKIE && cookie[1] == INET_DIAG_NOCOOKIE)
Pavel Emelyanovf65c1b52011-12-15 02:43:44 +000033 return 0;
Eric Dumazet33cf7c92015-03-11 18:53:14 -070034
35 res = sock_gen_cookie(sk);
36 if ((u32)res != cookie[0] || (u32)(res >> 32) != cookie[1])
37 return -ESTALE;
38
39 return 0;
Pavel Emelyanovf65c1b52011-12-15 02:43:44 +000040}
41EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
42
Eric Dumazet33cf7c92015-03-11 18:53:14 -070043void sock_diag_save_cookie(struct sock *sk, __u32 *cookie)
Pavel Emelyanovf65c1b52011-12-15 02:43:44 +000044{
Eric Dumazet33cf7c92015-03-11 18:53:14 -070045 u64 res = sock_gen_cookie(sk);
46
47 cookie[0] = (u32)res;
48 cookie[1] = (u32)(res >> 32);
Pavel Emelyanovf65c1b52011-12-15 02:43:44 +000049}
50EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
51
Pavel Emelyanov5d2e5f22011-12-30 00:53:13 +000052int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attrtype)
53{
Thomas Graf7b468662012-06-26 23:36:11 +000054 u32 mem[SK_MEMINFO_VARS];
Pavel Emelyanov5d2e5f22011-12-30 00:53:13 +000055
56 mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk);
57 mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf;
58 mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk);
59 mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf;
60 mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
61 mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
62 mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
Eric Dumazetd594e982012-06-04 03:50:35 +000063 mem[SK_MEMINFO_BACKLOG] = sk->sk_backlog.len;
Pavel Emelyanov5d2e5f22011-12-30 00:53:13 +000064
Thomas Graf7b468662012-06-26 23:36:11 +000065 return nla_put(skb, attrtype, sizeof(mem), &mem);
Pavel Emelyanov5d2e5f22011-12-30 00:53:13 +000066}
67EXPORT_SYMBOL_GPL(sock_diag_put_meminfo);
68
Eric W. Biedermana53b72c2014-04-23 14:26:25 -070069int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
Nicolas Dichtele8d96122013-04-25 06:53:54 +000070 struct sk_buff *skb, int attrtype)
71{
Daniel Borkmanna3ea2692014-03-28 18:58:19 +010072 struct sock_fprog_kern *fprog;
Nicolas Dichtele8d96122013-04-25 06:53:54 +000073 struct sk_filter *filter;
Daniel Borkmanna3ea2692014-03-28 18:58:19 +010074 struct nlattr *attr;
75 unsigned int flen;
Nicolas Dichtele8d96122013-04-25 06:53:54 +000076 int err = 0;
77
Eric W. Biedermana53b72c2014-04-23 14:26:25 -070078 if (!may_report_filterinfo) {
Nicolas Dichtele8d96122013-04-25 06:53:54 +000079 nla_reserve(skb, attrtype, 0);
80 return 0;
81 }
82
83 rcu_read_lock();
Nicolas Dichtele8d96122013-04-25 06:53:54 +000084 filter = rcu_dereference(sk->sk_filter);
Daniel Borkmanna3ea2692014-03-28 18:58:19 +010085 if (!filter)
86 goto out;
Nicolas Dichtele8d96122013-04-25 06:53:54 +000087
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -070088 fprog = filter->prog->orig_prog;
Alexei Starovoitov009937e2014-07-30 20:34:13 -070089 flen = bpf_classic_proglen(fprog);
Daniel Borkmanna3ea2692014-03-28 18:58:19 +010090
91 attr = nla_reserve(skb, attrtype, flen);
Nicolas Dichtele8d96122013-04-25 06:53:54 +000092 if (attr == NULL) {
93 err = -EMSGSIZE;
94 goto out;
95 }
96
Daniel Borkmanna3ea2692014-03-28 18:58:19 +010097 memcpy(nla_data(attr), fprog->filter, flen);
Nicolas Dichtele8d96122013-04-25 06:53:54 +000098out:
99 rcu_read_unlock();
100 return err;
101}
102EXPORT_SYMBOL(sock_diag_put_filterinfo);
103
Pavel Emelyanov8ef874b2011-12-06 07:59:52 +0000104void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh))
105{
106 mutex_lock(&sock_diag_table_mutex);
107 inet_rcv_compat = fn;
108 mutex_unlock(&sock_diag_table_mutex);
109}
110EXPORT_SYMBOL_GPL(sock_diag_register_inet_compat);
111
112void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh))
113{
114 mutex_lock(&sock_diag_table_mutex);
115 inet_rcv_compat = NULL;
116 mutex_unlock(&sock_diag_table_mutex);
117}
118EXPORT_SYMBOL_GPL(sock_diag_unregister_inet_compat);
119
Shan Wei8dcf01f2012-04-24 18:21:07 +0000120int sock_diag_register(const struct sock_diag_handler *hndl)
Pavel Emelyanov8ef874b2011-12-06 07:59:52 +0000121{
122 int err = 0;
123
Dan Carpenter6f8e4ad2011-12-07 20:49:38 +0000124 if (hndl->family >= AF_MAX)
Pavel Emelyanov8ef874b2011-12-06 07:59:52 +0000125 return -EINVAL;
126
127 mutex_lock(&sock_diag_table_mutex);
128 if (sock_diag_handlers[hndl->family])
129 err = -EBUSY;
130 else
131 sock_diag_handlers[hndl->family] = hndl;
132 mutex_unlock(&sock_diag_table_mutex);
133
134 return err;
135}
136EXPORT_SYMBOL_GPL(sock_diag_register);
137
Shan Wei8dcf01f2012-04-24 18:21:07 +0000138void sock_diag_unregister(const struct sock_diag_handler *hnld)
Pavel Emelyanov8ef874b2011-12-06 07:59:52 +0000139{
140 int family = hnld->family;
141
Dan Carpenter6f8e4ad2011-12-07 20:49:38 +0000142 if (family >= AF_MAX)
Pavel Emelyanov8ef874b2011-12-06 07:59:52 +0000143 return;
144
145 mutex_lock(&sock_diag_table_mutex);
146 BUG_ON(sock_diag_handlers[family] != hnld);
147 sock_diag_handlers[family] = NULL;
148 mutex_unlock(&sock_diag_table_mutex);
149}
150EXPORT_SYMBOL_GPL(sock_diag_unregister);
151
Pavel Emelyanov8ef874b2011-12-06 07:59:52 +0000152static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
153{
154 int err;
Thomas Graf7b468662012-06-26 23:36:11 +0000155 struct sock_diag_req *req = nlmsg_data(nlh);
Shan Wei8dcf01f2012-04-24 18:21:07 +0000156 const struct sock_diag_handler *hndl;
Pavel Emelyanov8ef874b2011-12-06 07:59:52 +0000157
158 if (nlmsg_len(nlh) < sizeof(*req))
159 return -EINVAL;
160
Mathias Krause6e601a52013-02-23 01:13:47 +0000161 if (req->sdiag_family >= AF_MAX)
162 return -EINVAL;
163
Mathias Krause8e904552013-02-23 01:13:48 +0000164 if (sock_diag_handlers[req->sdiag_family] == NULL)
165 request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
166 NETLINK_SOCK_DIAG, req->sdiag_family);
167
168 mutex_lock(&sock_diag_table_mutex);
169 hndl = sock_diag_handlers[req->sdiag_family];
Pavel Emelyanov8ef874b2011-12-06 07:59:52 +0000170 if (hndl == NULL)
171 err = -ENOENT;
172 else
173 err = hndl->dump(skb, nlh);
Mathias Krause8e904552013-02-23 01:13:48 +0000174 mutex_unlock(&sock_diag_table_mutex);
Pavel Emelyanov8ef874b2011-12-06 07:59:52 +0000175
176 return err;
177}
178
179static int sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
180{
181 int ret;
182
183 switch (nlh->nlmsg_type) {
184 case TCPDIAG_GETSOCK:
185 case DCCPDIAG_GETSOCK:
186 if (inet_rcv_compat == NULL)
187 request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
Pavel Emelyanovaec8dc62011-12-15 02:43:27 +0000188 NETLINK_SOCK_DIAG, AF_INET);
Pavel Emelyanov8ef874b2011-12-06 07:59:52 +0000189
190 mutex_lock(&sock_diag_table_mutex);
191 if (inet_rcv_compat != NULL)
192 ret = inet_rcv_compat(skb, nlh);
193 else
194 ret = -EOPNOTSUPP;
195 mutex_unlock(&sock_diag_table_mutex);
196
197 return ret;
198 case SOCK_DIAG_BY_FAMILY:
199 return __sock_diag_rcv_msg(skb, nlh);
200 default:
201 return -EINVAL;
202 }
203}
204
205static DEFINE_MUTEX(sock_diag_mutex);
206
207static void sock_diag_rcv(struct sk_buff *skb)
208{
209 mutex_lock(&sock_diag_mutex);
210 netlink_rcv_skb(skb, &sock_diag_rcv_msg);
211 mutex_unlock(&sock_diag_mutex);
212}
213
Andrey Vagin51d7ccc2012-07-16 04:28:49 +0000214static int __net_init diag_net_init(struct net *net)
Pavel Emelyanov8ef874b2011-12-06 07:59:52 +0000215{
Pablo Neira Ayusoa31f2d12012-06-29 06:15:21 +0000216 struct netlink_kernel_cfg cfg = {
217 .input = sock_diag_rcv,
218 };
219
Pablo Neira Ayuso9f00d972012-09-08 02:53:54 +0000220 net->diag_nlsk = netlink_kernel_create(net, NETLINK_SOCK_DIAG, &cfg);
Andrey Vagin51d7ccc2012-07-16 04:28:49 +0000221 return net->diag_nlsk == NULL ? -ENOMEM : 0;
222}
223
224static void __net_exit diag_net_exit(struct net *net)
225{
226 netlink_kernel_release(net->diag_nlsk);
227 net->diag_nlsk = NULL;
228}
229
230static struct pernet_operations diag_net_ops = {
231 .init = diag_net_init,
232 .exit = diag_net_exit,
233};
234
235static int __init sock_diag_init(void)
236{
237 return register_pernet_subsys(&diag_net_ops);
Pavel Emelyanov8ef874b2011-12-06 07:59:52 +0000238}
239
240static void __exit sock_diag_exit(void)
241{
Andrey Vagin51d7ccc2012-07-16 04:28:49 +0000242 unregister_pernet_subsys(&diag_net_ops);
Pavel Emelyanov8ef874b2011-12-06 07:59:52 +0000243}
244
245module_init(sock_diag_init);
246module_exit(sock_diag_exit);
247MODULE_LICENSE("GPL");
248MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_SOCK_DIAG);