Paul Gortmaker | b6191ae | 2015-10-07 17:27:43 -0400 | [diff] [blame] | 1 | /* License: GPL */ |
| 2 | |
Pavel Emelyanov | 8ef874b | 2011-12-06 07:59:52 +0000 | [diff] [blame] | 3 | #include <linux/mutex.h> |
| 4 | #include <linux/socket.h> |
| 5 | #include <linux/skbuff.h> |
| 6 | #include <net/netlink.h> |
| 7 | #include <net/net_namespace.h> |
| 8 | #include <linux/module.h> |
Pavel Emelyanov | 5d2e5f2 | 2011-12-30 00:53:13 +0000 | [diff] [blame] | 9 | #include <net/sock.h> |
Craig Gallek | eb4cb00 | 2015-06-15 11:26:18 -0400 | [diff] [blame] | 10 | #include <linux/kernel.h> |
| 11 | #include <linux/tcp.h> |
| 12 | #include <linux/workqueue.h> |
Jeremy Cline | 66b51b0 | 2018-08-13 22:23:13 +0000 | [diff] [blame] | 13 | #include <linux/nospec.h> |
Pavel Emelyanov | 8ef874b | 2011-12-06 07:59:52 +0000 | [diff] [blame] | 14 | |
| 15 | #include <linux/inet_diag.h> |
| 16 | #include <linux/sock_diag.h> |
| 17 | |
Shan Wei | 8dcf01f | 2012-04-24 18:21:07 +0000 | [diff] [blame] | 18 | static const struct sock_diag_handler *sock_diag_handlers[AF_MAX]; |
Pavel Emelyanov | 8ef874b | 2011-12-06 07:59:52 +0000 | [diff] [blame] | 19 | static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh); |
| 20 | static DEFINE_MUTEX(sock_diag_table_mutex); |
Craig Gallek | eb4cb00 | 2015-06-15 11:26:18 -0400 | [diff] [blame] | 21 | static struct workqueue_struct *broadcast_wq; |
Pavel Emelyanov | 8ef874b | 2011-12-06 07:59:52 +0000 | [diff] [blame] | 22 | |
Chenbo Feng | 91b8270 | 2017-03-22 17:27:34 -0700 | [diff] [blame] | 23 | u64 sock_gen_cookie(struct sock *sk) |
Pavel Emelyanov | f65c1b5 | 2011-12-15 02:43:44 +0000 | [diff] [blame] | 24 | { |
Eric Dumazet | 33cf7c9 | 2015-03-11 18:53:14 -0700 | [diff] [blame] | 25 | while (1) { |
| 26 | u64 res = atomic64_read(&sk->sk_cookie); |
| 27 | |
| 28 | if (res) |
| 29 | return res; |
| 30 | res = atomic64_inc_return(&sock_net(sk)->cookie_gen); |
| 31 | atomic64_cmpxchg(&sk->sk_cookie, 0, res); |
| 32 | } |
| 33 | } |
| 34 | |
| 35 | int sock_diag_check_cookie(struct sock *sk, const __u32 *cookie) |
| 36 | { |
| 37 | u64 res; |
| 38 | |
| 39 | if (cookie[0] == INET_DIAG_NOCOOKIE && cookie[1] == INET_DIAG_NOCOOKIE) |
Pavel Emelyanov | f65c1b5 | 2011-12-15 02:43:44 +0000 | [diff] [blame] | 40 | return 0; |
Eric Dumazet | 33cf7c9 | 2015-03-11 18:53:14 -0700 | [diff] [blame] | 41 | |
| 42 | res = sock_gen_cookie(sk); |
| 43 | if ((u32)res != cookie[0] || (u32)(res >> 32) != cookie[1]) |
| 44 | return -ESTALE; |
| 45 | |
| 46 | return 0; |
Pavel Emelyanov | f65c1b5 | 2011-12-15 02:43:44 +0000 | [diff] [blame] | 47 | } |
| 48 | EXPORT_SYMBOL_GPL(sock_diag_check_cookie); |
| 49 | |
Eric Dumazet | 33cf7c9 | 2015-03-11 18:53:14 -0700 | [diff] [blame] | 50 | void sock_diag_save_cookie(struct sock *sk, __u32 *cookie) |
Pavel Emelyanov | f65c1b5 | 2011-12-15 02:43:44 +0000 | [diff] [blame] | 51 | { |
Eric Dumazet | 33cf7c9 | 2015-03-11 18:53:14 -0700 | [diff] [blame] | 52 | u64 res = sock_gen_cookie(sk); |
| 53 | |
| 54 | cookie[0] = (u32)res; |
| 55 | cookie[1] = (u32)(res >> 32); |
Pavel Emelyanov | f65c1b5 | 2011-12-15 02:43:44 +0000 | [diff] [blame] | 56 | } |
| 57 | EXPORT_SYMBOL_GPL(sock_diag_save_cookie); |
| 58 | |
Pavel Emelyanov | 5d2e5f2 | 2011-12-30 00:53:13 +0000 | [diff] [blame] | 59 | int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attrtype) |
| 60 | { |
Thomas Graf | 7b46866 | 2012-06-26 23:36:11 +0000 | [diff] [blame] | 61 | u32 mem[SK_MEMINFO_VARS]; |
Pavel Emelyanov | 5d2e5f2 | 2011-12-30 00:53:13 +0000 | [diff] [blame] | 62 | |
Josh Hunt | a2d133b | 2017-03-20 15:22:03 -0400 | [diff] [blame] | 63 | sk_get_meminfo(sk, mem); |
Pavel Emelyanov | 5d2e5f2 | 2011-12-30 00:53:13 +0000 | [diff] [blame] | 64 | |
Thomas Graf | 7b46866 | 2012-06-26 23:36:11 +0000 | [diff] [blame] | 65 | return nla_put(skb, attrtype, sizeof(mem), &mem); |
Pavel Emelyanov | 5d2e5f2 | 2011-12-30 00:53:13 +0000 | [diff] [blame] | 66 | } |
| 67 | EXPORT_SYMBOL_GPL(sock_diag_put_meminfo); |
| 68 | |
Eric W. Biederman | a53b72c | 2014-04-23 14:26:25 -0700 | [diff] [blame] | 69 | int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk, |
Nicolas Dichtel | e8d9612 | 2013-04-25 06:53:54 +0000 | [diff] [blame] | 70 | struct sk_buff *skb, int attrtype) |
| 71 | { |
Daniel Borkmann | a3ea269 | 2014-03-28 18:58:19 +0100 | [diff] [blame] | 72 | struct sock_fprog_kern *fprog; |
Nicolas Dichtel | e8d9612 | 2013-04-25 06:53:54 +0000 | [diff] [blame] | 73 | struct sk_filter *filter; |
Daniel Borkmann | a3ea269 | 2014-03-28 18:58:19 +0100 | [diff] [blame] | 74 | struct nlattr *attr; |
| 75 | unsigned int flen; |
Nicolas Dichtel | e8d9612 | 2013-04-25 06:53:54 +0000 | [diff] [blame] | 76 | int err = 0; |
| 77 | |
Eric W. Biederman | a53b72c | 2014-04-23 14:26:25 -0700 | [diff] [blame] | 78 | if (!may_report_filterinfo) { |
Nicolas Dichtel | e8d9612 | 2013-04-25 06:53:54 +0000 | [diff] [blame] | 79 | nla_reserve(skb, attrtype, 0); |
| 80 | return 0; |
| 81 | } |
| 82 | |
| 83 | rcu_read_lock(); |
Nicolas Dichtel | e8d9612 | 2013-04-25 06:53:54 +0000 | [diff] [blame] | 84 | filter = rcu_dereference(sk->sk_filter); |
Daniel Borkmann | a3ea269 | 2014-03-28 18:58:19 +0100 | [diff] [blame] | 85 | if (!filter) |
| 86 | goto out; |
Nicolas Dichtel | e8d9612 | 2013-04-25 06:53:54 +0000 | [diff] [blame] | 87 | |
Alexei Starovoitov | 7ae457c | 2014-07-30 20:34:16 -0700 | [diff] [blame] | 88 | fprog = filter->prog->orig_prog; |
Daniel Borkmann | b382c08 | 2015-09-02 14:00:36 +0200 | [diff] [blame] | 89 | if (!fprog) |
| 90 | goto out; |
| 91 | |
Alexei Starovoitov | 009937e | 2014-07-30 20:34:13 -0700 | [diff] [blame] | 92 | flen = bpf_classic_proglen(fprog); |
Daniel Borkmann | a3ea269 | 2014-03-28 18:58:19 +0100 | [diff] [blame] | 93 | |
| 94 | attr = nla_reserve(skb, attrtype, flen); |
Nicolas Dichtel | e8d9612 | 2013-04-25 06:53:54 +0000 | [diff] [blame] | 95 | if (attr == NULL) { |
| 96 | err = -EMSGSIZE; |
| 97 | goto out; |
| 98 | } |
| 99 | |
Daniel Borkmann | a3ea269 | 2014-03-28 18:58:19 +0100 | [diff] [blame] | 100 | memcpy(nla_data(attr), fprog->filter, flen); |
Nicolas Dichtel | e8d9612 | 2013-04-25 06:53:54 +0000 | [diff] [blame] | 101 | out: |
| 102 | rcu_read_unlock(); |
| 103 | return err; |
| 104 | } |
| 105 | EXPORT_SYMBOL(sock_diag_put_filterinfo); |
| 106 | |
Craig Gallek | eb4cb00 | 2015-06-15 11:26:18 -0400 | [diff] [blame] | 107 | struct broadcast_sk { |
| 108 | struct sock *sk; |
| 109 | struct work_struct work; |
| 110 | }; |
| 111 | |
| 112 | static size_t sock_diag_nlmsg_size(void) |
| 113 | { |
| 114 | return NLMSG_ALIGN(sizeof(struct inet_diag_msg) |
| 115 | + nla_total_size(sizeof(u8)) /* INET_DIAG_PROTOCOL */ |
Nicolas Dichtel | 6ed46d1 | 2016-04-26 10:06:14 +0200 | [diff] [blame] | 116 | + nla_total_size_64bit(sizeof(struct tcp_info))); /* INET_DIAG_INFO */ |
Craig Gallek | eb4cb00 | 2015-06-15 11:26:18 -0400 | [diff] [blame] | 117 | } |
| 118 | |
| 119 | static void sock_diag_broadcast_destroy_work(struct work_struct *work) |
| 120 | { |
| 121 | struct broadcast_sk *bsk = |
| 122 | container_of(work, struct broadcast_sk, work); |
| 123 | struct sock *sk = bsk->sk; |
| 124 | const struct sock_diag_handler *hndl; |
| 125 | struct sk_buff *skb; |
| 126 | const enum sknetlink_groups group = sock_diag_destroy_group(sk); |
| 127 | int err = -1; |
| 128 | |
| 129 | WARN_ON(group == SKNLGRP_NONE); |
| 130 | |
| 131 | skb = nlmsg_new(sock_diag_nlmsg_size(), GFP_KERNEL); |
| 132 | if (!skb) |
| 133 | goto out; |
| 134 | |
| 135 | mutex_lock(&sock_diag_table_mutex); |
| 136 | hndl = sock_diag_handlers[sk->sk_family]; |
| 137 | if (hndl && hndl->get_info) |
| 138 | err = hndl->get_info(skb, sk); |
| 139 | mutex_unlock(&sock_diag_table_mutex); |
| 140 | |
| 141 | if (!err) |
| 142 | nlmsg_multicast(sock_net(sk)->diag_nlsk, skb, 0, group, |
| 143 | GFP_KERNEL); |
| 144 | else |
| 145 | kfree_skb(skb); |
| 146 | out: |
| 147 | sk_destruct(sk); |
| 148 | kfree(bsk); |
| 149 | } |
| 150 | |
| 151 | void sock_diag_broadcast_destroy(struct sock *sk) |
| 152 | { |
| 153 | /* Note, this function is often called from an interrupt context. */ |
| 154 | struct broadcast_sk *bsk = |
| 155 | kmalloc(sizeof(struct broadcast_sk), GFP_ATOMIC); |
| 156 | if (!bsk) |
| 157 | return sk_destruct(sk); |
| 158 | bsk->sk = sk; |
| 159 | INIT_WORK(&bsk->work, sock_diag_broadcast_destroy_work); |
| 160 | queue_work(broadcast_wq, &bsk->work); |
| 161 | } |
| 162 | |
Pavel Emelyanov | 8ef874b | 2011-12-06 07:59:52 +0000 | [diff] [blame] | 163 | void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh)) |
| 164 | { |
| 165 | mutex_lock(&sock_diag_table_mutex); |
| 166 | inet_rcv_compat = fn; |
| 167 | mutex_unlock(&sock_diag_table_mutex); |
| 168 | } |
| 169 | EXPORT_SYMBOL_GPL(sock_diag_register_inet_compat); |
| 170 | |
| 171 | void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh)) |
| 172 | { |
| 173 | mutex_lock(&sock_diag_table_mutex); |
| 174 | inet_rcv_compat = NULL; |
| 175 | mutex_unlock(&sock_diag_table_mutex); |
| 176 | } |
| 177 | EXPORT_SYMBOL_GPL(sock_diag_unregister_inet_compat); |
| 178 | |
Shan Wei | 8dcf01f | 2012-04-24 18:21:07 +0000 | [diff] [blame] | 179 | int sock_diag_register(const struct sock_diag_handler *hndl) |
Pavel Emelyanov | 8ef874b | 2011-12-06 07:59:52 +0000 | [diff] [blame] | 180 | { |
| 181 | int err = 0; |
| 182 | |
Dan Carpenter | 6f8e4ad | 2011-12-07 20:49:38 +0000 | [diff] [blame] | 183 | if (hndl->family >= AF_MAX) |
Pavel Emelyanov | 8ef874b | 2011-12-06 07:59:52 +0000 | [diff] [blame] | 184 | return -EINVAL; |
| 185 | |
| 186 | mutex_lock(&sock_diag_table_mutex); |
| 187 | if (sock_diag_handlers[hndl->family]) |
| 188 | err = -EBUSY; |
| 189 | else |
| 190 | sock_diag_handlers[hndl->family] = hndl; |
| 191 | mutex_unlock(&sock_diag_table_mutex); |
| 192 | |
| 193 | return err; |
| 194 | } |
| 195 | EXPORT_SYMBOL_GPL(sock_diag_register); |
| 196 | |
Shan Wei | 8dcf01f | 2012-04-24 18:21:07 +0000 | [diff] [blame] | 197 | void sock_diag_unregister(const struct sock_diag_handler *hnld) |
Pavel Emelyanov | 8ef874b | 2011-12-06 07:59:52 +0000 | [diff] [blame] | 198 | { |
| 199 | int family = hnld->family; |
| 200 | |
Dan Carpenter | 6f8e4ad | 2011-12-07 20:49:38 +0000 | [diff] [blame] | 201 | if (family >= AF_MAX) |
Pavel Emelyanov | 8ef874b | 2011-12-06 07:59:52 +0000 | [diff] [blame] | 202 | return; |
| 203 | |
| 204 | mutex_lock(&sock_diag_table_mutex); |
| 205 | BUG_ON(sock_diag_handlers[family] != hnld); |
| 206 | sock_diag_handlers[family] = NULL; |
| 207 | mutex_unlock(&sock_diag_table_mutex); |
| 208 | } |
| 209 | EXPORT_SYMBOL_GPL(sock_diag_unregister); |
| 210 | |
Lorenzo Colitti | 64be0ae | 2015-12-16 12:30:03 +0900 | [diff] [blame] | 211 | static int __sock_diag_cmd(struct sk_buff *skb, struct nlmsghdr *nlh) |
Pavel Emelyanov | 8ef874b | 2011-12-06 07:59:52 +0000 | [diff] [blame] | 212 | { |
| 213 | int err; |
Thomas Graf | 7b46866 | 2012-06-26 23:36:11 +0000 | [diff] [blame] | 214 | struct sock_diag_req *req = nlmsg_data(nlh); |
Shan Wei | 8dcf01f | 2012-04-24 18:21:07 +0000 | [diff] [blame] | 215 | const struct sock_diag_handler *hndl; |
Pavel Emelyanov | 8ef874b | 2011-12-06 07:59:52 +0000 | [diff] [blame] | 216 | |
| 217 | if (nlmsg_len(nlh) < sizeof(*req)) |
| 218 | return -EINVAL; |
| 219 | |
Mathias Krause | 6e601a5 | 2013-02-23 01:13:47 +0000 | [diff] [blame] | 220 | if (req->sdiag_family >= AF_MAX) |
| 221 | return -EINVAL; |
Jeremy Cline | 66b51b0 | 2018-08-13 22:23:13 +0000 | [diff] [blame] | 222 | req->sdiag_family = array_index_nospec(req->sdiag_family, AF_MAX); |
Mathias Krause | 6e601a5 | 2013-02-23 01:13:47 +0000 | [diff] [blame] | 223 | |
Mathias Krause | 8e90455 | 2013-02-23 01:13:48 +0000 | [diff] [blame] | 224 | if (sock_diag_handlers[req->sdiag_family] == NULL) |
Xin Long | bf2ae2e | 2018-03-10 18:57:50 +0800 | [diff] [blame] | 225 | sock_load_diag_module(req->sdiag_family, 0); |
Mathias Krause | 8e90455 | 2013-02-23 01:13:48 +0000 | [diff] [blame] | 226 | |
| 227 | mutex_lock(&sock_diag_table_mutex); |
| 228 | hndl = sock_diag_handlers[req->sdiag_family]; |
Pavel Emelyanov | 8ef874b | 2011-12-06 07:59:52 +0000 | [diff] [blame] | 229 | if (hndl == NULL) |
| 230 | err = -ENOENT; |
Lorenzo Colitti | 64be0ae | 2015-12-16 12:30:03 +0900 | [diff] [blame] | 231 | else if (nlh->nlmsg_type == SOCK_DIAG_BY_FAMILY) |
Pavel Emelyanov | 8ef874b | 2011-12-06 07:59:52 +0000 | [diff] [blame] | 232 | err = hndl->dump(skb, nlh); |
Lorenzo Colitti | 64be0ae | 2015-12-16 12:30:03 +0900 | [diff] [blame] | 233 | else if (nlh->nlmsg_type == SOCK_DESTROY && hndl->destroy) |
| 234 | err = hndl->destroy(skb, nlh); |
| 235 | else |
| 236 | err = -EOPNOTSUPP; |
Mathias Krause | 8e90455 | 2013-02-23 01:13:48 +0000 | [diff] [blame] | 237 | mutex_unlock(&sock_diag_table_mutex); |
Pavel Emelyanov | 8ef874b | 2011-12-06 07:59:52 +0000 | [diff] [blame] | 238 | |
| 239 | return err; |
| 240 | } |
| 241 | |
Johannes Berg | 2d4bc93 | 2017-04-12 14:34:04 +0200 | [diff] [blame] | 242 | static int sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, |
| 243 | struct netlink_ext_ack *extack) |
Pavel Emelyanov | 8ef874b | 2011-12-06 07:59:52 +0000 | [diff] [blame] | 244 | { |
| 245 | int ret; |
| 246 | |
| 247 | switch (nlh->nlmsg_type) { |
| 248 | case TCPDIAG_GETSOCK: |
| 249 | case DCCPDIAG_GETSOCK: |
| 250 | if (inet_rcv_compat == NULL) |
Xin Long | bf2ae2e | 2018-03-10 18:57:50 +0800 | [diff] [blame] | 251 | sock_load_diag_module(AF_INET, 0); |
Pavel Emelyanov | 8ef874b | 2011-12-06 07:59:52 +0000 | [diff] [blame] | 252 | |
| 253 | mutex_lock(&sock_diag_table_mutex); |
| 254 | if (inet_rcv_compat != NULL) |
| 255 | ret = inet_rcv_compat(skb, nlh); |
| 256 | else |
| 257 | ret = -EOPNOTSUPP; |
| 258 | mutex_unlock(&sock_diag_table_mutex); |
| 259 | |
| 260 | return ret; |
| 261 | case SOCK_DIAG_BY_FAMILY: |
Lorenzo Colitti | 64be0ae | 2015-12-16 12:30:03 +0900 | [diff] [blame] | 262 | case SOCK_DESTROY: |
| 263 | return __sock_diag_cmd(skb, nlh); |
Pavel Emelyanov | 8ef874b | 2011-12-06 07:59:52 +0000 | [diff] [blame] | 264 | default: |
| 265 | return -EINVAL; |
| 266 | } |
| 267 | } |
| 268 | |
| 269 | static DEFINE_MUTEX(sock_diag_mutex); |
| 270 | |
| 271 | static void sock_diag_rcv(struct sk_buff *skb) |
| 272 | { |
| 273 | mutex_lock(&sock_diag_mutex); |
| 274 | netlink_rcv_skb(skb, &sock_diag_rcv_msg); |
| 275 | mutex_unlock(&sock_diag_mutex); |
| 276 | } |
| 277 | |
Craig Gallek | eb4cb00 | 2015-06-15 11:26:18 -0400 | [diff] [blame] | 278 | static int sock_diag_bind(struct net *net, int group) |
| 279 | { |
| 280 | switch (group) { |
| 281 | case SKNLGRP_INET_TCP_DESTROY: |
| 282 | case SKNLGRP_INET_UDP_DESTROY: |
| 283 | if (!sock_diag_handlers[AF_INET]) |
Xin Long | bf2ae2e | 2018-03-10 18:57:50 +0800 | [diff] [blame] | 284 | sock_load_diag_module(AF_INET, 0); |
Craig Gallek | eb4cb00 | 2015-06-15 11:26:18 -0400 | [diff] [blame] | 285 | break; |
| 286 | case SKNLGRP_INET6_TCP_DESTROY: |
| 287 | case SKNLGRP_INET6_UDP_DESTROY: |
| 288 | if (!sock_diag_handlers[AF_INET6]) |
Xin Long | bf2ae2e | 2018-03-10 18:57:50 +0800 | [diff] [blame] | 289 | sock_load_diag_module(AF_INET6, 0); |
Craig Gallek | eb4cb00 | 2015-06-15 11:26:18 -0400 | [diff] [blame] | 290 | break; |
| 291 | } |
| 292 | return 0; |
| 293 | } |
| 294 | |
Lorenzo Colitti | 64be0ae | 2015-12-16 12:30:03 +0900 | [diff] [blame] | 295 | int sock_diag_destroy(struct sock *sk, int err) |
| 296 | { |
| 297 | if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) |
| 298 | return -EPERM; |
| 299 | |
| 300 | if (!sk->sk_prot->diag_destroy) |
| 301 | return -EOPNOTSUPP; |
| 302 | |
| 303 | return sk->sk_prot->diag_destroy(sk, err); |
| 304 | } |
| 305 | EXPORT_SYMBOL_GPL(sock_diag_destroy); |
| 306 | |
Andrey Vagin | 51d7ccc | 2012-07-16 04:28:49 +0000 | [diff] [blame] | 307 | static int __net_init diag_net_init(struct net *net) |
Pavel Emelyanov | 8ef874b | 2011-12-06 07:59:52 +0000 | [diff] [blame] | 308 | { |
Pablo Neira Ayuso | a31f2d1 | 2012-06-29 06:15:21 +0000 | [diff] [blame] | 309 | struct netlink_kernel_cfg cfg = { |
Craig Gallek | eb4cb00 | 2015-06-15 11:26:18 -0400 | [diff] [blame] | 310 | .groups = SKNLGRP_MAX, |
Pablo Neira Ayuso | a31f2d1 | 2012-06-29 06:15:21 +0000 | [diff] [blame] | 311 | .input = sock_diag_rcv, |
Craig Gallek | eb4cb00 | 2015-06-15 11:26:18 -0400 | [diff] [blame] | 312 | .bind = sock_diag_bind, |
| 313 | .flags = NL_CFG_F_NONROOT_RECV, |
Pablo Neira Ayuso | a31f2d1 | 2012-06-29 06:15:21 +0000 | [diff] [blame] | 314 | }; |
| 315 | |
Pablo Neira Ayuso | 9f00d97 | 2012-09-08 02:53:54 +0000 | [diff] [blame] | 316 | net->diag_nlsk = netlink_kernel_create(net, NETLINK_SOCK_DIAG, &cfg); |
Andrey Vagin | 51d7ccc | 2012-07-16 04:28:49 +0000 | [diff] [blame] | 317 | return net->diag_nlsk == NULL ? -ENOMEM : 0; |
| 318 | } |
| 319 | |
| 320 | static void __net_exit diag_net_exit(struct net *net) |
| 321 | { |
| 322 | netlink_kernel_release(net->diag_nlsk); |
| 323 | net->diag_nlsk = NULL; |
| 324 | } |
| 325 | |
| 326 | static struct pernet_operations diag_net_ops = { |
| 327 | .init = diag_net_init, |
| 328 | .exit = diag_net_exit, |
| 329 | }; |
| 330 | |
| 331 | static int __init sock_diag_init(void) |
| 332 | { |
Craig Gallek | eb4cb00 | 2015-06-15 11:26:18 -0400 | [diff] [blame] | 333 | broadcast_wq = alloc_workqueue("sock_diag_events", 0, 0); |
| 334 | BUG_ON(!broadcast_wq); |
Andrey Vagin | 51d7ccc | 2012-07-16 04:28:49 +0000 | [diff] [blame] | 335 | return register_pernet_subsys(&diag_net_ops); |
Pavel Emelyanov | 8ef874b | 2011-12-06 07:59:52 +0000 | [diff] [blame] | 336 | } |
Paul Gortmaker | b6191ae | 2015-10-07 17:27:43 -0400 | [diff] [blame] | 337 | device_initcall(sock_diag_init); |