blob: 67b74287535dfbb7059b04a9e8d4b7b180b070c5 [file] [log] [blame]
YOSHIFUJI Hideaki601e68e2007-02-12 11:15:49 -08001/* netfilter.c: look after the filters for various protocols.
Harald Weltef6ebe772005-08-09 20:21:49 -07002 * Heavily influenced by the old firewall.c by David Bonn and Alan Cox.
3 *
4 * Thanks to Rob `CmdrTaco' Malda for not influencing this code in any
5 * way.
6 *
7 * Rusty Russell (C)2000 -- This code is GPL.
Patrick McHardyf229f6c2013-04-06 15:24:29 +02008 * Patrick McHardy (c) 2006-2012
Harald Weltef6ebe772005-08-09 20:21:49 -07009 */
Harald Weltef6ebe772005-08-09 20:21:49 -070010#include <linux/kernel.h>
11#include <linux/netfilter.h>
12#include <net/protocol.h>
13#include <linux/init.h>
14#include <linux/skbuff.h>
15#include <linux/wait.h>
16#include <linux/module.h>
17#include <linux/interrupt.h>
18#include <linux/if.h>
19#include <linux/netdevice.h>
Florian Westphal56768642014-11-13 10:04:16 +010020#include <linux/netfilter_ipv6.h>
Harald Weltef6ebe772005-08-09 20:21:49 -070021#include <linux/inetdevice.h>
22#include <linux/proc_fs.h>
Patrick McHardyd486dd12007-02-12 11:09:55 -080023#include <linux/mutex.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090024#include <linux/slab.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020025#include <net/net_namespace.h>
Harald Weltef6ebe772005-08-09 20:21:49 -070026#include <net/sock.h>
27
28#include "nf_internals.h"
29
Patrick McHardyd486dd12007-02-12 11:09:55 -080030static DEFINE_MUTEX(afinfo_mutex);
Patrick McHardybce80322006-04-06 14:18:09 -070031
Arnd Bergmann0906a372010-03-09 20:59:15 +010032const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly;
Patrick McHardybce80322006-04-06 14:18:09 -070033EXPORT_SYMBOL(nf_afinfo);
Florian Westphal2a7851b2013-05-17 03:56:10 +000034const struct nf_ipv6_ops __rcu *nf_ipv6_ops __read_mostly;
35EXPORT_SYMBOL_GPL(nf_ipv6_ops);
Patrick McHardybce80322006-04-06 14:18:09 -070036
Florian Westphale7c88992015-07-14 17:51:07 +020037DEFINE_PER_CPU(bool, nf_skb_duplicated);
38EXPORT_SYMBOL_GPL(nf_skb_duplicated);
39
Patrick McHardy1e796fd2007-12-17 22:42:27 -080040int nf_register_afinfo(const struct nf_afinfo *afinfo)
Patrick McHardybce80322006-04-06 14:18:09 -070041{
Pablo Neira Ayuso7926dbf2014-07-31 20:38:46 +020042 mutex_lock(&afinfo_mutex);
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +000043 RCU_INIT_POINTER(nf_afinfo[afinfo->family], afinfo);
Patrick McHardyd486dd12007-02-12 11:09:55 -080044 mutex_unlock(&afinfo_mutex);
Patrick McHardybce80322006-04-06 14:18:09 -070045 return 0;
46}
47EXPORT_SYMBOL_GPL(nf_register_afinfo);
48
Patrick McHardy1e796fd2007-12-17 22:42:27 -080049void nf_unregister_afinfo(const struct nf_afinfo *afinfo)
Patrick McHardybce80322006-04-06 14:18:09 -070050{
Patrick McHardyd486dd12007-02-12 11:09:55 -080051 mutex_lock(&afinfo_mutex);
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +000052 RCU_INIT_POINTER(nf_afinfo[afinfo->family], NULL);
Patrick McHardyd486dd12007-02-12 11:09:55 -080053 mutex_unlock(&afinfo_mutex);
Patrick McHardybce80322006-04-06 14:18:09 -070054 synchronize_rcu();
55}
56EXPORT_SYMBOL_GPL(nf_unregister_afinfo);
57
Zhouyi Zhoud1c85c22014-08-22 10:40:15 +080058#ifdef HAVE_JUMP_LABEL
Ingo Molnarc5905af2012-02-24 08:31:31 +010059struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
Eric Dumazeta2d7ec52011-11-18 17:32:46 +000060EXPORT_SYMBOL(nf_hooks_needed);
61#endif
62
Patrick McHardyfd706d62007-02-12 11:10:14 -080063static DEFINE_MUTEX(nf_hook_mutex);
Harald Weltef6ebe772005-08-09 20:21:49 -070064
Pablo Neira Ayuso3bbd14e2015-07-20 13:32:52 +020065static struct list_head *nf_find_hook_list(struct net *net,
Eric W. Biederman085db2c2015-07-10 18:15:06 -050066 const struct nf_hook_ops *reg)
Eric W. Biederman0edcf282015-07-10 18:14:30 -050067{
Pablo Neira Ayuso3bbd14e2015-07-20 13:32:52 +020068 struct list_head *hook_list = NULL;
Eric W. Biederman0edcf282015-07-10 18:14:30 -050069
70 if (reg->pf != NFPROTO_NETDEV)
Pablo Neira Ayuso3bbd14e2015-07-20 13:32:52 +020071 hook_list = &net->nf.hooks[reg->pf][reg->hooknum];
Eric W. Biederman0edcf282015-07-10 18:14:30 -050072 else if (reg->hooknum == NF_NETDEV_INGRESS) {
73#ifdef CONFIG_NETFILTER_INGRESS
Eric W. Biederman085db2c2015-07-10 18:15:06 -050074 if (reg->dev && dev_net(reg->dev) == net)
Pablo Neira Ayuso3bbd14e2015-07-20 13:32:52 +020075 hook_list = &reg->dev->nf_hooks_ingress;
Eric W. Biederman0edcf282015-07-10 18:14:30 -050076#endif
77 }
Pablo Neira Ayuso3bbd14e2015-07-20 13:32:52 +020078 return hook_list;
Eric W. Biederman0edcf282015-07-10 18:14:30 -050079}
80
Pablo Neira Ayuso7181eba2015-07-20 09:31:25 +020081struct nf_hook_entry {
82 const struct nf_hook_ops *orig_ops;
83 struct nf_hook_ops ops;
84};
85
Eric W. Biederman085db2c2015-07-10 18:15:06 -050086int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg)
Harald Weltef6ebe772005-08-09 20:21:49 -070087{
Pablo Neira Ayuso3bbd14e2015-07-20 13:32:52 +020088 struct list_head *hook_list;
Pablo Neira Ayuso7181eba2015-07-20 09:31:25 +020089 struct nf_hook_entry *entry;
90 struct nf_hook_ops *elem;
Harald Weltef6ebe772005-08-09 20:21:49 -070091
Aaron Conoled4bb5ca2016-09-21 11:35:05 -040092 if (reg->pf == NFPROTO_NETDEV &&
93 (reg->hooknum != NF_NETDEV_INGRESS ||
94 !reg->dev || dev_net(reg->dev) != net))
95 return -EINVAL;
96
Pablo Neira Ayuso7181eba2015-07-20 09:31:25 +020097 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
98 if (!entry)
Eric W. Biederman085db2c2015-07-10 18:15:06 -050099 return -ENOMEM;
100
Pablo Neira Ayuso7181eba2015-07-20 09:31:25 +0200101 entry->orig_ops = reg;
102 entry->ops = *reg;
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500103
Pablo Neira Ayuso3bbd14e2015-07-20 13:32:52 +0200104 hook_list = nf_find_hook_list(net, reg);
105 if (!hook_list) {
Pablo Neira Ayuso7181eba2015-07-20 09:31:25 +0200106 kfree(entry);
Eric W. Biederman0edcf282015-07-10 18:14:30 -0500107 return -ENOENT;
Eric W. Biedermane317fa52015-07-18 10:21:14 -0500108 }
Pablo Neirae687ad62015-05-13 18:19:38 +0200109
Eric W. Biederman0edcf282015-07-10 18:14:30 -0500110 mutex_lock(&nf_hook_mutex);
Pablo Neira Ayuso3bbd14e2015-07-20 13:32:52 +0200111 list_for_each_entry(elem, hook_list, list) {
Li Zefan4c610972007-12-04 23:22:26 -0800112 if (reg->priority < elem->priority)
Harald Weltef6ebe772005-08-09 20:21:49 -0700113 break;
114 }
Pablo Neira Ayuso7181eba2015-07-20 09:31:25 +0200115 list_add_rcu(&entry->ops.list, elem->list.prev);
Patrick McHardyfd706d62007-02-12 11:10:14 -0800116 mutex_unlock(&nf_hook_mutex);
Eric W. Biederman4c091152015-07-10 18:13:58 -0500117#ifdef CONFIG_NETFILTER_INGRESS
118 if (reg->pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS)
119 net_inc_ingress_queue();
120#endif
Zhouyi Zhoud1c85c22014-08-22 10:40:15 +0800121#ifdef HAVE_JUMP_LABEL
Ingo Molnarc5905af2012-02-24 08:31:31 +0100122 static_key_slow_inc(&nf_hooks_needed[reg->pf][reg->hooknum]);
Eric Dumazeta2d7ec52011-11-18 17:32:46 +0000123#endif
Harald Weltef6ebe772005-08-09 20:21:49 -0700124 return 0;
125}
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500126EXPORT_SYMBOL(nf_register_net_hook);
Harald Weltef6ebe772005-08-09 20:21:49 -0700127
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500128void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
Harald Weltef6ebe772005-08-09 20:21:49 -0700129{
Pablo Neira Ayuso3bbd14e2015-07-20 13:32:52 +0200130 struct list_head *hook_list;
Pablo Neira Ayuso7181eba2015-07-20 09:31:25 +0200131 struct nf_hook_entry *entry;
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500132 struct nf_hook_ops *elem;
133
Pablo Neira Ayuso3bbd14e2015-07-20 13:32:52 +0200134 hook_list = nf_find_hook_list(net, reg);
135 if (!hook_list)
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500136 return;
137
Patrick McHardyfd706d62007-02-12 11:10:14 -0800138 mutex_lock(&nf_hook_mutex);
Pablo Neira Ayuso3bbd14e2015-07-20 13:32:52 +0200139 list_for_each_entry(elem, hook_list, list) {
Pablo Neira Ayuso7181eba2015-07-20 09:31:25 +0200140 entry = container_of(elem, struct nf_hook_entry, ops);
141 if (entry->orig_ops == reg) {
142 list_del_rcu(&entry->ops.list);
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500143 break;
144 }
145 }
Patrick McHardyfd706d62007-02-12 11:10:14 -0800146 mutex_unlock(&nf_hook_mutex);
Pablo Neira Ayuso3bbd14e2015-07-20 13:32:52 +0200147 if (&elem->list == hook_list) {
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500148 WARN(1, "nf_unregister_net_hook: hook not found!\n");
149 return;
150 }
Pablo Neirae687ad62015-05-13 18:19:38 +0200151#ifdef CONFIG_NETFILTER_INGRESS
Eric W. Biederman4c091152015-07-10 18:13:58 -0500152 if (reg->pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS)
153 net_dec_ingress_queue();
Pablo Neirae687ad62015-05-13 18:19:38 +0200154#endif
Zhouyi Zhoud1c85c22014-08-22 10:40:15 +0800155#ifdef HAVE_JUMP_LABEL
Ingo Molnarc5905af2012-02-24 08:31:31 +0100156 static_key_slow_dec(&nf_hooks_needed[reg->pf][reg->hooknum]);
Eric Dumazeta2d7ec52011-11-18 17:32:46 +0000157#endif
Harald Weltef6ebe772005-08-09 20:21:49 -0700158 synchronize_net();
Pablo Neira Ayuso7181eba2015-07-20 09:31:25 +0200159 nf_queue_nf_hook_drop(net, &entry->ops);
Florian Westphal514ed622015-10-08 23:38:07 +0200160 /* other cpu might still process nfqueue verdict that used reg */
161 synchronize_net();
Pablo Neira Ayuso7181eba2015-07-20 09:31:25 +0200162 kfree(entry);
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500163}
164EXPORT_SYMBOL(nf_unregister_net_hook);
165
166int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
167 unsigned int n)
168{
169 unsigned int i;
170 int err = 0;
171
172 for (i = 0; i < n; i++) {
173 err = nf_register_net_hook(net, &reg[i]);
174 if (err)
175 goto err;
176 }
177 return err;
178
179err:
180 if (i > 0)
181 nf_unregister_net_hooks(net, reg, i);
182 return err;
183}
184EXPORT_SYMBOL(nf_register_net_hooks);
185
186void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
187 unsigned int n)
188{
189 while (n-- > 0)
190 nf_unregister_net_hook(net, &reg[n]);
191}
192EXPORT_SYMBOL(nf_unregister_net_hooks);
193
194static LIST_HEAD(nf_hook_list);
195
196int nf_register_hook(struct nf_hook_ops *reg)
197{
198 struct net *net, *last;
199 int ret;
200
201 rtnl_lock();
202 for_each_net(net) {
203 ret = nf_register_net_hook(net, reg);
204 if (ret && ret != -ENOENT)
205 goto rollback;
206 }
207 list_add_tail(&reg->list, &nf_hook_list);
208 rtnl_unlock();
209
210 return 0;
211rollback:
212 last = net;
213 for_each_net(net) {
214 if (net == last)
215 break;
216 nf_unregister_net_hook(net, reg);
217 }
218 rtnl_unlock();
219 return ret;
220}
221EXPORT_SYMBOL(nf_register_hook);
222
223void nf_unregister_hook(struct nf_hook_ops *reg)
224{
225 struct net *net;
226
227 rtnl_lock();
228 list_del(&reg->list);
229 for_each_net(net)
230 nf_unregister_net_hook(net, reg);
231 rtnl_unlock();
Harald Weltef6ebe772005-08-09 20:21:49 -0700232}
233EXPORT_SYMBOL(nf_unregister_hook);
234
Patrick McHardy972d1cb2006-04-06 14:09:12 -0700235int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n)
236{
237 unsigned int i;
238 int err = 0;
239
240 for (i = 0; i < n; i++) {
241 err = nf_register_hook(&reg[i]);
242 if (err)
243 goto err;
244 }
245 return err;
246
247err:
248 if (i > 0)
249 nf_unregister_hooks(reg, i);
250 return err;
251}
252EXPORT_SYMBOL(nf_register_hooks);
253
254void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n)
255{
Changli Gaof68c5302010-10-04 22:24:12 +0200256 while (n-- > 0)
257 nf_unregister_hook(&reg[n]);
Patrick McHardy972d1cb2006-04-06 14:09:12 -0700258}
259EXPORT_SYMBOL(nf_unregister_hooks);
260
Harald Weltef6ebe772005-08-09 20:21:49 -0700261unsigned int nf_iterate(struct list_head *head,
Herbert Xu3db05fe2007-10-15 00:53:15 -0700262 struct sk_buff *skb,
David S. Millercfdfab32015-04-03 16:23:58 -0400263 struct nf_hook_state *state,
264 struct nf_hook_ops **elemp)
Harald Weltef6ebe772005-08-09 20:21:49 -0700265{
266 unsigned int verdict;
267
268 /*
269 * The caller must not block between calls to this
270 * function because of risk of continuing from deleted element.
271 */
Michael Wang2a6decf2012-08-22 19:59:57 +0000272 list_for_each_entry_continue_rcu((*elemp), head, list) {
David S. Millercfdfab32015-04-03 16:23:58 -0400273 if (state->thresh > (*elemp)->priority)
Harald Weltef6ebe772005-08-09 20:21:49 -0700274 continue;
275
276 /* Optimization: we don't need to hold module
YOSHIFUJI Hideaki601e68e2007-02-12 11:15:49 -0800277 reference here, since function can't sleep. --RR */
Patrick McHardyde9963f2011-02-14 17:35:07 +0100278repeat:
Eric W. Biederman06198b32015-09-18 14:33:06 -0500279 verdict = (*elemp)->hook((*elemp)->priv, skb, state);
Harald Weltef6ebe772005-08-09 20:21:49 -0700280 if (verdict != NF_ACCEPT) {
281#ifdef CONFIG_NETFILTER_DEBUG
282 if (unlikely((verdict & NF_VERDICT_MASK)
283 > NF_MAX_VERDICT)) {
284 NFDEBUG("Evil return from %p(%u).\n",
David S. Millercfdfab32015-04-03 16:23:58 -0400285 (*elemp)->hook, state->hook);
Harald Weltef6ebe772005-08-09 20:21:49 -0700286 continue;
287 }
288#endif
Michael Wang2a6decf2012-08-22 19:59:57 +0000289 if (verdict != NF_REPEAT)
Harald Weltef6ebe772005-08-09 20:21:49 -0700290 return verdict;
Patrick McHardyde9963f2011-02-14 17:35:07 +0100291 goto repeat;
Harald Weltef6ebe772005-08-09 20:21:49 -0700292 }
293 }
294 return NF_ACCEPT;
295}
296
297
298/* Returns 1 if okfn() needs to be executed by the caller,
Aaron Conolee2361cb2016-09-21 11:35:04 -0400299 * -EPERM for NF_DROP, 0 otherwise. Caller must hold rcu_read_lock. */
David S. Millercfdfab32015-04-03 16:23:58 -0400300int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state)
Harald Weltef6ebe772005-08-09 20:21:49 -0700301{
Michael Wang2a6decf2012-08-22 19:59:57 +0000302 struct nf_hook_ops *elem;
Harald Weltef6ebe772005-08-09 20:21:49 -0700303 unsigned int verdict;
304 int ret = 0;
305
Pablo Neiraf7191482015-05-13 18:19:35 +0200306 elem = list_entry_rcu(state->hook_list, struct nf_hook_ops, list);
Harald Weltef6ebe772005-08-09 20:21:49 -0700307next_hook:
Pablo Neiraf7191482015-05-13 18:19:35 +0200308 verdict = nf_iterate(state->hook_list, skb, state, &elem);
Harald Weltef6ebe772005-08-09 20:21:49 -0700309 if (verdict == NF_ACCEPT || verdict == NF_STOP) {
310 ret = 1;
Eric Parisda683652010-11-16 11:52:38 +0000311 } else if ((verdict & NF_VERDICT_MASK) == NF_DROP) {
Herbert Xu3db05fe2007-10-15 00:53:15 -0700312 kfree_skb(skb);
Florian Westphalf615df72011-01-18 15:52:14 +0100313 ret = NF_DROP_GETERR(verdict);
Eric Parisda683652010-11-16 11:52:38 +0000314 if (ret == 0)
315 ret = -EPERM;
Patrick McHardyf9c63992007-12-05 01:27:46 -0800316 } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
David S. Millercfdfab32015-04-03 16:23:58 -0400317 int err = nf_queue(skb, elem, state,
318 verdict >> NF_VERDICT_QBITS);
Florian Westphal563e1232011-10-31 12:20:16 +0100319 if (err < 0) {
Florian Westphal563e1232011-10-31 12:20:16 +0100320 if (err == -ESRCH &&
Florian Westphal94b27cc2011-01-18 16:08:30 +0100321 (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
322 goto next_hook;
Florian Westphal06cdb632011-01-18 15:28:38 +0100323 kfree_skb(skb);
324 }
Harald Weltef6ebe772005-08-09 20:21:49 -0700325 }
Harald Weltef6ebe772005-08-09 20:21:49 -0700326 return ret;
327}
328EXPORT_SYMBOL(nf_hook_slow);
329
330
Herbert Xu37d41872007-10-14 00:39:18 -0700331int skb_make_writable(struct sk_buff *skb, unsigned int writable_len)
Harald Weltef6ebe772005-08-09 20:21:49 -0700332{
Herbert Xu37d41872007-10-14 00:39:18 -0700333 if (writable_len > skb->len)
Harald Weltef6ebe772005-08-09 20:21:49 -0700334 return 0;
335
336 /* Not exclusive use of packet? Must copy. */
Herbert Xu37d41872007-10-14 00:39:18 -0700337 if (!skb_cloned(skb)) {
338 if (writable_len <= skb_headlen(skb))
339 return 1;
340 } else if (skb_clone_writable(skb, writable_len))
341 return 1;
Harald Weltef6ebe772005-08-09 20:21:49 -0700342
Herbert Xu37d41872007-10-14 00:39:18 -0700343 if (writable_len <= skb_headlen(skb))
344 writable_len = 0;
345 else
346 writable_len -= skb_headlen(skb);
Harald Weltef6ebe772005-08-09 20:21:49 -0700347
Herbert Xu37d41872007-10-14 00:39:18 -0700348 return !!__pskb_pull_tail(skb, writable_len);
Harald Weltef6ebe772005-08-09 20:21:49 -0700349}
350EXPORT_SYMBOL(skb_make_writable);
351
Pablo Neira Ayusob7bd1802015-09-30 22:53:44 +0100352/* This needs to be compiled in any case to avoid dependencies between the
353 * nfnetlink_queue code and nf_conntrack.
354 */
Ken-ichirou MATSUZAWAa4b47662015-10-05 11:47:13 +0900355struct nfnl_ct_hook __rcu *nfnl_ct_hook __read_mostly;
356EXPORT_SYMBOL_GPL(nfnl_ct_hook);
Pablo Neira Ayusob7bd1802015-09-30 22:53:44 +0100357
Igor Maravićc0cd1152011-12-12 02:58:24 +0000358#if IS_ENABLED(CONFIG_NF_CONNTRACK)
Harald Weltef6ebe772005-08-09 20:21:49 -0700359/* This does not belong here, but locally generated errors need it if connection
360 tracking in use: without this, connection may not be in hash table, and hence
361 manufactured ICMP or RST packets will not be associated with it. */
Patrick McHardy312a0c162013-07-28 22:54:08 +0200362void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *)
363 __rcu __read_mostly;
Harald Weltef6ebe772005-08-09 20:21:49 -0700364EXPORT_SYMBOL(ip_ct_attach);
365
Patrick McHardy312a0c162013-07-28 22:54:08 +0200366void nf_ct_attach(struct sk_buff *new, const struct sk_buff *skb)
Harald Weltef6ebe772005-08-09 20:21:49 -0700367{
Patrick McHardy312a0c162013-07-28 22:54:08 +0200368 void (*attach)(struct sk_buff *, const struct sk_buff *);
Harald Weltef6ebe772005-08-09 20:21:49 -0700369
Patrick McHardyc3a47ab2007-02-12 11:09:19 -0800370 if (skb->nfct) {
371 rcu_read_lock();
372 attach = rcu_dereference(ip_ct_attach);
373 if (attach)
374 attach(new, skb);
375 rcu_read_unlock();
Harald Weltef6ebe772005-08-09 20:21:49 -0700376 }
377}
378EXPORT_SYMBOL(nf_ct_attach);
Yasuyuki Kozakaide6e05c2007-03-23 11:17:27 -0700379
Eric Dumazet0e60ebe2010-11-15 18:17:21 +0100380void (*nf_ct_destroy)(struct nf_conntrack *) __rcu __read_mostly;
Yasuyuki Kozakaide6e05c2007-03-23 11:17:27 -0700381EXPORT_SYMBOL(nf_ct_destroy);
382
383void nf_conntrack_destroy(struct nf_conntrack *nfct)
384{
385 void (*destroy)(struct nf_conntrack *);
386
387 rcu_read_lock();
388 destroy = rcu_dereference(nf_ct_destroy);
389 BUG_ON(destroy == NULL);
390 destroy(nfct);
391 rcu_read_unlock();
392}
393EXPORT_SYMBOL(nf_conntrack_destroy);
Pablo Neira Ayuso9cb01762012-06-07 12:13:39 +0200394
Daniel Borkmann62da9862015-09-03 01:26:07 +0200395/* Built-in default zone used e.g. by modules. */
396const struct nf_conntrack_zone nf_ct_zone_dflt = {
397 .id = NF_CT_DEFAULT_ZONE_ID,
398 .dir = NF_CT_DEFAULT_ZONE_DIR,
399};
400EXPORT_SYMBOL_GPL(nf_ct_zone_dflt);
Yasuyuki Kozakaide6e05c2007-03-23 11:17:27 -0700401#endif /* CONFIG_NF_CONNTRACK */
Harald Weltef6ebe772005-08-09 20:21:49 -0700402
Patrick McHardyc7232c92012-08-26 19:14:06 +0200403#ifdef CONFIG_NF_NAT_NEEDED
404void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
405EXPORT_SYMBOL(nf_nat_decode_session_hook);
406#endif
407
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500408static int nf_register_hook_list(struct net *net)
409{
410 struct nf_hook_ops *elem;
411 int ret;
412
413 rtnl_lock();
414 list_for_each_entry(elem, &nf_hook_list, list) {
415 ret = nf_register_net_hook(net, elem);
416 if (ret && ret != -ENOENT)
417 goto out_undo;
418 }
419 rtnl_unlock();
420 return 0;
421
422out_undo:
423 list_for_each_entry_continue_reverse(elem, &nf_hook_list, list)
424 nf_unregister_net_hook(net, elem);
425 rtnl_unlock();
426 return ret;
427}
428
429static void nf_unregister_hook_list(struct net *net)
430{
431 struct nf_hook_ops *elem;
432
433 rtnl_lock();
434 list_for_each_entry(elem, &nf_hook_list, list)
435 nf_unregister_net_hook(net, elem);
436 rtnl_unlock();
437}
438
Gao fengf3c1a442013-03-24 23:50:39 +0000439static int __net_init netfilter_net_init(struct net *net)
440{
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500441 int i, h, ret;
442
443 for (i = 0; i < ARRAY_SIZE(net->nf.hooks); i++) {
444 for (h = 0; h < NF_MAX_HOOKS; h++)
445 INIT_LIST_HEAD(&net->nf.hooks[i][h]);
446 }
447
Gao fengf3c1a442013-03-24 23:50:39 +0000448#ifdef CONFIG_PROC_FS
449 net->nf.proc_netfilter = proc_net_mkdir(net, "netfilter",
450 net->proc_net);
Pablo Neira Ayuso12202fa2013-04-05 19:40:10 +0200451 if (!net->nf.proc_netfilter) {
452 if (!net_eq(net, &init_net))
453 pr_err("cannot create netfilter proc entry");
454
Gao fengf3c1a442013-03-24 23:50:39 +0000455 return -ENOMEM;
456 }
457#endif
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500458 ret = nf_register_hook_list(net);
459 if (ret)
460 remove_proc_entry("netfilter", net->proc_net);
461
462 return ret;
Gao fengf3c1a442013-03-24 23:50:39 +0000463}
464
465static void __net_exit netfilter_net_exit(struct net *net)
466{
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500467 nf_unregister_hook_list(net);
Gao fengf3c1a442013-03-24 23:50:39 +0000468 remove_proc_entry("netfilter", net->proc_net);
469}
470
471static struct pernet_operations netfilter_net_ops = {
472 .init = netfilter_net_init,
473 .exit = netfilter_net_exit,
474};
475
Pablo Neira Ayuso6d11cfd2013-05-22 22:42:36 +0000476int __init netfilter_init(void)
Harald Weltef6ebe772005-08-09 20:21:49 -0700477{
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500478 int ret;
Harald Weltef6ebe772005-08-09 20:21:49 -0700479
Pablo Neira Ayuso6d11cfd2013-05-22 22:42:36 +0000480 ret = register_pernet_subsys(&netfilter_net_ops);
481 if (ret < 0)
482 goto err;
Harald Weltef6ebe772005-08-09 20:21:49 -0700483
Pablo Neira Ayuso6d11cfd2013-05-22 22:42:36 +0000484 ret = netfilter_log_init();
485 if (ret < 0)
486 goto err_pernet;
487
488 return 0;
489err_pernet:
490 unregister_pernet_subsys(&netfilter_net_ops);
491err:
492 return ret;
Harald Weltef6ebe772005-08-09 20:21:49 -0700493}