blob: 21a085686dc1b543439f33e448531bc64a273684 [file] [log] [blame]
YOSHIFUJI Hideaki601e68e2007-02-12 11:15:49 -08001/* netfilter.c: look after the filters for various protocols.
Harald Weltef6ebe772005-08-09 20:21:49 -07002 * Heavily influenced by the old firewall.c by David Bonn and Alan Cox.
3 *
4 * Thanks to Rob `CmdrTaco' Malda for not influencing this code in any
5 * way.
6 *
7 * Rusty Russell (C)2000 -- This code is GPL.
Patrick McHardyf229f6c2013-04-06 15:24:29 +02008 * Patrick McHardy (c) 2006-2012
Harald Weltef6ebe772005-08-09 20:21:49 -07009 */
Harald Weltef6ebe772005-08-09 20:21:49 -070010#include <linux/kernel.h>
11#include <linux/netfilter.h>
12#include <net/protocol.h>
13#include <linux/init.h>
14#include <linux/skbuff.h>
15#include <linux/wait.h>
16#include <linux/module.h>
17#include <linux/interrupt.h>
18#include <linux/if.h>
19#include <linux/netdevice.h>
Florian Westphal56768642014-11-13 10:04:16 +010020#include <linux/netfilter_ipv6.h>
Harald Weltef6ebe772005-08-09 20:21:49 -070021#include <linux/inetdevice.h>
22#include <linux/proc_fs.h>
Patrick McHardyd486dd12007-02-12 11:09:55 -080023#include <linux/mutex.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090024#include <linux/slab.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020025#include <net/net_namespace.h>
Harald Weltef6ebe772005-08-09 20:21:49 -070026#include <net/sock.h>
27
28#include "nf_internals.h"
29
Patrick McHardyd486dd12007-02-12 11:09:55 -080030static DEFINE_MUTEX(afinfo_mutex);
Patrick McHardybce80322006-04-06 14:18:09 -070031
Arnd Bergmann0906a372010-03-09 20:59:15 +010032const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly;
Patrick McHardybce80322006-04-06 14:18:09 -070033EXPORT_SYMBOL(nf_afinfo);
Florian Westphal2a7851b2013-05-17 03:56:10 +000034const struct nf_ipv6_ops __rcu *nf_ipv6_ops __read_mostly;
35EXPORT_SYMBOL_GPL(nf_ipv6_ops);
Patrick McHardybce80322006-04-06 14:18:09 -070036
Florian Westphale7c88992015-07-14 17:51:07 +020037DEFINE_PER_CPU(bool, nf_skb_duplicated);
38EXPORT_SYMBOL_GPL(nf_skb_duplicated);
39
Patrick McHardy1e796fd2007-12-17 22:42:27 -080040int nf_register_afinfo(const struct nf_afinfo *afinfo)
Patrick McHardybce80322006-04-06 14:18:09 -070041{
Pablo Neira Ayuso7926dbf2014-07-31 20:38:46 +020042 mutex_lock(&afinfo_mutex);
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +000043 RCU_INIT_POINTER(nf_afinfo[afinfo->family], afinfo);
Patrick McHardyd486dd12007-02-12 11:09:55 -080044 mutex_unlock(&afinfo_mutex);
Patrick McHardybce80322006-04-06 14:18:09 -070045 return 0;
46}
47EXPORT_SYMBOL_GPL(nf_register_afinfo);
48
Patrick McHardy1e796fd2007-12-17 22:42:27 -080049void nf_unregister_afinfo(const struct nf_afinfo *afinfo)
Patrick McHardybce80322006-04-06 14:18:09 -070050{
Patrick McHardyd486dd12007-02-12 11:09:55 -080051 mutex_lock(&afinfo_mutex);
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +000052 RCU_INIT_POINTER(nf_afinfo[afinfo->family], NULL);
Patrick McHardyd486dd12007-02-12 11:09:55 -080053 mutex_unlock(&afinfo_mutex);
Patrick McHardybce80322006-04-06 14:18:09 -070054 synchronize_rcu();
55}
56EXPORT_SYMBOL_GPL(nf_unregister_afinfo);
57
Zhouyi Zhoud1c85c22014-08-22 10:40:15 +080058#ifdef HAVE_JUMP_LABEL
Ingo Molnarc5905af2012-02-24 08:31:31 +010059struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
Eric Dumazeta2d7ec52011-11-18 17:32:46 +000060EXPORT_SYMBOL(nf_hooks_needed);
61#endif
62
Patrick McHardyfd706d62007-02-12 11:10:14 -080063static DEFINE_MUTEX(nf_hook_mutex);
Harald Weltef6ebe772005-08-09 20:21:49 -070064
Pablo Neira Ayuso3bbd14e2015-07-20 13:32:52 +020065static struct list_head *nf_find_hook_list(struct net *net,
Eric W. Biederman085db2c2015-07-10 18:15:06 -050066 const struct nf_hook_ops *reg)
Eric W. Biederman0edcf282015-07-10 18:14:30 -050067{
Pablo Neira Ayuso3bbd14e2015-07-20 13:32:52 +020068 struct list_head *hook_list = NULL;
Eric W. Biederman0edcf282015-07-10 18:14:30 -050069
70 if (reg->pf != NFPROTO_NETDEV)
Pablo Neira Ayuso3bbd14e2015-07-20 13:32:52 +020071 hook_list = &net->nf.hooks[reg->pf][reg->hooknum];
Eric W. Biederman0edcf282015-07-10 18:14:30 -050072 else if (reg->hooknum == NF_NETDEV_INGRESS) {
73#ifdef CONFIG_NETFILTER_INGRESS
Eric W. Biederman085db2c2015-07-10 18:15:06 -050074 if (reg->dev && dev_net(reg->dev) == net)
Pablo Neira Ayuso3bbd14e2015-07-20 13:32:52 +020075 hook_list = &reg->dev->nf_hooks_ingress;
Eric W. Biederman0edcf282015-07-10 18:14:30 -050076#endif
77 }
Pablo Neira Ayuso3bbd14e2015-07-20 13:32:52 +020078 return hook_list;
Eric W. Biederman0edcf282015-07-10 18:14:30 -050079}
80
Pablo Neira Ayuso7181eba2015-07-20 09:31:25 +020081struct nf_hook_entry {
82 const struct nf_hook_ops *orig_ops;
83 struct nf_hook_ops ops;
84};
85
Eric W. Biederman085db2c2015-07-10 18:15:06 -050086int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg)
Harald Weltef6ebe772005-08-09 20:21:49 -070087{
Pablo Neira Ayuso3bbd14e2015-07-20 13:32:52 +020088 struct list_head *hook_list;
Pablo Neira Ayuso7181eba2015-07-20 09:31:25 +020089 struct nf_hook_entry *entry;
90 struct nf_hook_ops *elem;
Harald Weltef6ebe772005-08-09 20:21:49 -070091
Pablo Neira Ayuso7181eba2015-07-20 09:31:25 +020092 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
93 if (!entry)
Eric W. Biederman085db2c2015-07-10 18:15:06 -050094 return -ENOMEM;
95
Pablo Neira Ayuso7181eba2015-07-20 09:31:25 +020096 entry->orig_ops = reg;
97 entry->ops = *reg;
Eric W. Biederman085db2c2015-07-10 18:15:06 -050098
Pablo Neira Ayuso3bbd14e2015-07-20 13:32:52 +020099 hook_list = nf_find_hook_list(net, reg);
100 if (!hook_list) {
Pablo Neira Ayuso7181eba2015-07-20 09:31:25 +0200101 kfree(entry);
Eric W. Biederman0edcf282015-07-10 18:14:30 -0500102 return -ENOENT;
Eric W. Biedermane317fa52015-07-18 10:21:14 -0500103 }
Pablo Neirae687ad62015-05-13 18:19:38 +0200104
Eric W. Biederman0edcf282015-07-10 18:14:30 -0500105 mutex_lock(&nf_hook_mutex);
Pablo Neira Ayuso3bbd14e2015-07-20 13:32:52 +0200106 list_for_each_entry(elem, hook_list, list) {
Li Zefan4c610972007-12-04 23:22:26 -0800107 if (reg->priority < elem->priority)
Harald Weltef6ebe772005-08-09 20:21:49 -0700108 break;
109 }
Pablo Neira Ayuso7181eba2015-07-20 09:31:25 +0200110 list_add_rcu(&entry->ops.list, elem->list.prev);
Patrick McHardyfd706d62007-02-12 11:10:14 -0800111 mutex_unlock(&nf_hook_mutex);
Eric W. Biederman4c091152015-07-10 18:13:58 -0500112#ifdef CONFIG_NETFILTER_INGRESS
113 if (reg->pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS)
114 net_inc_ingress_queue();
115#endif
Zhouyi Zhoud1c85c22014-08-22 10:40:15 +0800116#ifdef HAVE_JUMP_LABEL
Ingo Molnarc5905af2012-02-24 08:31:31 +0100117 static_key_slow_inc(&nf_hooks_needed[reg->pf][reg->hooknum]);
Eric Dumazeta2d7ec52011-11-18 17:32:46 +0000118#endif
Harald Weltef6ebe772005-08-09 20:21:49 -0700119 return 0;
120}
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500121EXPORT_SYMBOL(nf_register_net_hook);
Harald Weltef6ebe772005-08-09 20:21:49 -0700122
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500123void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
Harald Weltef6ebe772005-08-09 20:21:49 -0700124{
Pablo Neira Ayuso3bbd14e2015-07-20 13:32:52 +0200125 struct list_head *hook_list;
Pablo Neira Ayuso7181eba2015-07-20 09:31:25 +0200126 struct nf_hook_entry *entry;
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500127 struct nf_hook_ops *elem;
128
Pablo Neira Ayuso3bbd14e2015-07-20 13:32:52 +0200129 hook_list = nf_find_hook_list(net, reg);
130 if (!hook_list)
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500131 return;
132
Patrick McHardyfd706d62007-02-12 11:10:14 -0800133 mutex_lock(&nf_hook_mutex);
Pablo Neira Ayuso3bbd14e2015-07-20 13:32:52 +0200134 list_for_each_entry(elem, hook_list, list) {
Pablo Neira Ayuso7181eba2015-07-20 09:31:25 +0200135 entry = container_of(elem, struct nf_hook_entry, ops);
136 if (entry->orig_ops == reg) {
137 list_del_rcu(&entry->ops.list);
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500138 break;
139 }
140 }
Patrick McHardyfd706d62007-02-12 11:10:14 -0800141 mutex_unlock(&nf_hook_mutex);
Pablo Neira Ayuso3bbd14e2015-07-20 13:32:52 +0200142 if (&elem->list == hook_list) {
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500143 WARN(1, "nf_unregister_net_hook: hook not found!\n");
144 return;
145 }
Pablo Neirae687ad62015-05-13 18:19:38 +0200146#ifdef CONFIG_NETFILTER_INGRESS
Eric W. Biederman4c091152015-07-10 18:13:58 -0500147 if (reg->pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS)
148 net_dec_ingress_queue();
Pablo Neirae687ad62015-05-13 18:19:38 +0200149#endif
Zhouyi Zhoud1c85c22014-08-22 10:40:15 +0800150#ifdef HAVE_JUMP_LABEL
Ingo Molnarc5905af2012-02-24 08:31:31 +0100151 static_key_slow_dec(&nf_hooks_needed[reg->pf][reg->hooknum]);
Eric Dumazeta2d7ec52011-11-18 17:32:46 +0000152#endif
Harald Weltef6ebe772005-08-09 20:21:49 -0700153 synchronize_net();
Pablo Neira Ayuso7181eba2015-07-20 09:31:25 +0200154 nf_queue_nf_hook_drop(net, &entry->ops);
Florian Westphal514ed622015-10-08 23:38:07 +0200155 /* other cpu might still process nfqueue verdict that used reg */
156 synchronize_net();
Pablo Neira Ayuso7181eba2015-07-20 09:31:25 +0200157 kfree(entry);
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500158}
159EXPORT_SYMBOL(nf_unregister_net_hook);
160
161int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
162 unsigned int n)
163{
164 unsigned int i;
165 int err = 0;
166
167 for (i = 0; i < n; i++) {
168 err = nf_register_net_hook(net, &reg[i]);
169 if (err)
170 goto err;
171 }
172 return err;
173
174err:
175 if (i > 0)
176 nf_unregister_net_hooks(net, reg, i);
177 return err;
178}
179EXPORT_SYMBOL(nf_register_net_hooks);
180
181void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
182 unsigned int n)
183{
184 while (n-- > 0)
185 nf_unregister_net_hook(net, &reg[n]);
186}
187EXPORT_SYMBOL(nf_unregister_net_hooks);
188
189static LIST_HEAD(nf_hook_list);
190
191int nf_register_hook(struct nf_hook_ops *reg)
192{
193 struct net *net, *last;
194 int ret;
195
196 rtnl_lock();
197 for_each_net(net) {
198 ret = nf_register_net_hook(net, reg);
199 if (ret && ret != -ENOENT)
200 goto rollback;
201 }
202 list_add_tail(&reg->list, &nf_hook_list);
203 rtnl_unlock();
204
205 return 0;
206rollback:
207 last = net;
208 for_each_net(net) {
209 if (net == last)
210 break;
211 nf_unregister_net_hook(net, reg);
212 }
213 rtnl_unlock();
214 return ret;
215}
216EXPORT_SYMBOL(nf_register_hook);
217
218void nf_unregister_hook(struct nf_hook_ops *reg)
219{
220 struct net *net;
221
222 rtnl_lock();
223 list_del(&reg->list);
224 for_each_net(net)
225 nf_unregister_net_hook(net, reg);
226 rtnl_unlock();
Harald Weltef6ebe772005-08-09 20:21:49 -0700227}
228EXPORT_SYMBOL(nf_unregister_hook);
229
Patrick McHardy972d1cb2006-04-06 14:09:12 -0700230int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n)
231{
232 unsigned int i;
233 int err = 0;
234
235 for (i = 0; i < n; i++) {
236 err = nf_register_hook(&reg[i]);
237 if (err)
238 goto err;
239 }
240 return err;
241
242err:
243 if (i > 0)
244 nf_unregister_hooks(reg, i);
245 return err;
246}
247EXPORT_SYMBOL(nf_register_hooks);
248
249void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n)
250{
Changli Gaof68c5302010-10-04 22:24:12 +0200251 while (n-- > 0)
252 nf_unregister_hook(&reg[n]);
Patrick McHardy972d1cb2006-04-06 14:09:12 -0700253}
254EXPORT_SYMBOL(nf_unregister_hooks);
255
Harald Weltef6ebe772005-08-09 20:21:49 -0700256unsigned int nf_iterate(struct list_head *head,
Herbert Xu3db05fe2007-10-15 00:53:15 -0700257 struct sk_buff *skb,
David S. Millercfdfab32015-04-03 16:23:58 -0400258 struct nf_hook_state *state,
259 struct nf_hook_ops **elemp)
Harald Weltef6ebe772005-08-09 20:21:49 -0700260{
261 unsigned int verdict;
262
263 /*
264 * The caller must not block between calls to this
265 * function because of risk of continuing from deleted element.
266 */
Michael Wang2a6decf2012-08-22 19:59:57 +0000267 list_for_each_entry_continue_rcu((*elemp), head, list) {
David S. Millercfdfab32015-04-03 16:23:58 -0400268 if (state->thresh > (*elemp)->priority)
Harald Weltef6ebe772005-08-09 20:21:49 -0700269 continue;
270
271 /* Optimization: we don't need to hold module
YOSHIFUJI Hideaki601e68e2007-02-12 11:15:49 -0800272 reference here, since function can't sleep. --RR */
Patrick McHardyde9963f2011-02-14 17:35:07 +0100273repeat:
David S. Miller238e54c2015-04-03 20:32:56 -0400274 verdict = (*elemp)->hook(*elemp, skb, state);
Harald Weltef6ebe772005-08-09 20:21:49 -0700275 if (verdict != NF_ACCEPT) {
276#ifdef CONFIG_NETFILTER_DEBUG
277 if (unlikely((verdict & NF_VERDICT_MASK)
278 > NF_MAX_VERDICT)) {
279 NFDEBUG("Evil return from %p(%u).\n",
David S. Millercfdfab32015-04-03 16:23:58 -0400280 (*elemp)->hook, state->hook);
Harald Weltef6ebe772005-08-09 20:21:49 -0700281 continue;
282 }
283#endif
Michael Wang2a6decf2012-08-22 19:59:57 +0000284 if (verdict != NF_REPEAT)
Harald Weltef6ebe772005-08-09 20:21:49 -0700285 return verdict;
Patrick McHardyde9963f2011-02-14 17:35:07 +0100286 goto repeat;
Harald Weltef6ebe772005-08-09 20:21:49 -0700287 }
288 }
289 return NF_ACCEPT;
290}
291
292
293/* Returns 1 if okfn() needs to be executed by the caller,
294 * -EPERM for NF_DROP, 0 otherwise. */
David S. Millercfdfab32015-04-03 16:23:58 -0400295int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state)
Harald Weltef6ebe772005-08-09 20:21:49 -0700296{
Michael Wang2a6decf2012-08-22 19:59:57 +0000297 struct nf_hook_ops *elem;
Harald Weltef6ebe772005-08-09 20:21:49 -0700298 unsigned int verdict;
299 int ret = 0;
300
301 /* We may already have this, but read-locks nest anyway */
302 rcu_read_lock();
303
Pablo Neiraf7191482015-05-13 18:19:35 +0200304 elem = list_entry_rcu(state->hook_list, struct nf_hook_ops, list);
Harald Weltef6ebe772005-08-09 20:21:49 -0700305next_hook:
Pablo Neiraf7191482015-05-13 18:19:35 +0200306 verdict = nf_iterate(state->hook_list, skb, state, &elem);
Harald Weltef6ebe772005-08-09 20:21:49 -0700307 if (verdict == NF_ACCEPT || verdict == NF_STOP) {
308 ret = 1;
Eric Parisda683652010-11-16 11:52:38 +0000309 } else if ((verdict & NF_VERDICT_MASK) == NF_DROP) {
Herbert Xu3db05fe2007-10-15 00:53:15 -0700310 kfree_skb(skb);
Florian Westphalf615df72011-01-18 15:52:14 +0100311 ret = NF_DROP_GETERR(verdict);
Eric Parisda683652010-11-16 11:52:38 +0000312 if (ret == 0)
313 ret = -EPERM;
Patrick McHardyf9c63992007-12-05 01:27:46 -0800314 } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
David S. Millercfdfab32015-04-03 16:23:58 -0400315 int err = nf_queue(skb, elem, state,
316 verdict >> NF_VERDICT_QBITS);
Florian Westphal563e1232011-10-31 12:20:16 +0100317 if (err < 0) {
318 if (err == -ECANCELED)
Florian Westphal06cdb632011-01-18 15:28:38 +0100319 goto next_hook;
Florian Westphal563e1232011-10-31 12:20:16 +0100320 if (err == -ESRCH &&
Florian Westphal94b27cc2011-01-18 16:08:30 +0100321 (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
322 goto next_hook;
Florian Westphal06cdb632011-01-18 15:28:38 +0100323 kfree_skb(skb);
324 }
Harald Weltef6ebe772005-08-09 20:21:49 -0700325 }
Harald Weltef6ebe772005-08-09 20:21:49 -0700326 rcu_read_unlock();
327 return ret;
328}
329EXPORT_SYMBOL(nf_hook_slow);
330
331
Herbert Xu37d41872007-10-14 00:39:18 -0700332int skb_make_writable(struct sk_buff *skb, unsigned int writable_len)
Harald Weltef6ebe772005-08-09 20:21:49 -0700333{
Herbert Xu37d41872007-10-14 00:39:18 -0700334 if (writable_len > skb->len)
Harald Weltef6ebe772005-08-09 20:21:49 -0700335 return 0;
336
337 /* Not exclusive use of packet? Must copy. */
Herbert Xu37d41872007-10-14 00:39:18 -0700338 if (!skb_cloned(skb)) {
339 if (writable_len <= skb_headlen(skb))
340 return 1;
341 } else if (skb_clone_writable(skb, writable_len))
342 return 1;
Harald Weltef6ebe772005-08-09 20:21:49 -0700343
Herbert Xu37d41872007-10-14 00:39:18 -0700344 if (writable_len <= skb_headlen(skb))
345 writable_len = 0;
346 else
347 writable_len -= skb_headlen(skb);
Harald Weltef6ebe772005-08-09 20:21:49 -0700348
Herbert Xu37d41872007-10-14 00:39:18 -0700349 return !!__pskb_pull_tail(skb, writable_len);
Harald Weltef6ebe772005-08-09 20:21:49 -0700350}
351EXPORT_SYMBOL(skb_make_writable);
352
Igor Maravićc0cd1152011-12-12 02:58:24 +0000353#if IS_ENABLED(CONFIG_NF_CONNTRACK)
Harald Weltef6ebe772005-08-09 20:21:49 -0700354/* This does not belong here, but locally generated errors need it if connection
355 tracking in use: without this, connection may not be in hash table, and hence
356 manufactured ICMP or RST packets will not be associated with it. */
Patrick McHardy312a0c162013-07-28 22:54:08 +0200357void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *)
358 __rcu __read_mostly;
Harald Weltef6ebe772005-08-09 20:21:49 -0700359EXPORT_SYMBOL(ip_ct_attach);
360
Patrick McHardy312a0c162013-07-28 22:54:08 +0200361void nf_ct_attach(struct sk_buff *new, const struct sk_buff *skb)
Harald Weltef6ebe772005-08-09 20:21:49 -0700362{
Patrick McHardy312a0c162013-07-28 22:54:08 +0200363 void (*attach)(struct sk_buff *, const struct sk_buff *);
Harald Weltef6ebe772005-08-09 20:21:49 -0700364
Patrick McHardyc3a47ab2007-02-12 11:09:19 -0800365 if (skb->nfct) {
366 rcu_read_lock();
367 attach = rcu_dereference(ip_ct_attach);
368 if (attach)
369 attach(new, skb);
370 rcu_read_unlock();
Harald Weltef6ebe772005-08-09 20:21:49 -0700371 }
372}
373EXPORT_SYMBOL(nf_ct_attach);
Yasuyuki Kozakaide6e05c2007-03-23 11:17:27 -0700374
Eric Dumazet0e60ebe2010-11-15 18:17:21 +0100375void (*nf_ct_destroy)(struct nf_conntrack *) __rcu __read_mostly;
Yasuyuki Kozakaide6e05c2007-03-23 11:17:27 -0700376EXPORT_SYMBOL(nf_ct_destroy);
377
378void nf_conntrack_destroy(struct nf_conntrack *nfct)
379{
380 void (*destroy)(struct nf_conntrack *);
381
382 rcu_read_lock();
383 destroy = rcu_dereference(nf_ct_destroy);
384 BUG_ON(destroy == NULL);
385 destroy(nfct);
386 rcu_read_unlock();
387}
388EXPORT_SYMBOL(nf_conntrack_destroy);
Pablo Neira Ayuso9cb01762012-06-07 12:13:39 +0200389
Pablo Neira Ayuso5a05fae2012-06-20 20:50:31 +0200390struct nfq_ct_hook __rcu *nfq_ct_hook __read_mostly;
Pablo Neira Ayuso9cb01762012-06-07 12:13:39 +0200391EXPORT_SYMBOL_GPL(nfq_ct_hook);
392
Daniel Borkmann62da9862015-09-03 01:26:07 +0200393/* Built-in default zone used e.g. by modules. */
394const struct nf_conntrack_zone nf_ct_zone_dflt = {
395 .id = NF_CT_DEFAULT_ZONE_ID,
396 .dir = NF_CT_DEFAULT_ZONE_DIR,
397};
398EXPORT_SYMBOL_GPL(nf_ct_zone_dflt);
Yasuyuki Kozakaide6e05c2007-03-23 11:17:27 -0700399#endif /* CONFIG_NF_CONNTRACK */
Harald Weltef6ebe772005-08-09 20:21:49 -0700400
Patrick McHardyc7232c92012-08-26 19:14:06 +0200401#ifdef CONFIG_NF_NAT_NEEDED
402void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
403EXPORT_SYMBOL(nf_nat_decode_session_hook);
404#endif
405
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500406static int nf_register_hook_list(struct net *net)
407{
408 struct nf_hook_ops *elem;
409 int ret;
410
411 rtnl_lock();
412 list_for_each_entry(elem, &nf_hook_list, list) {
413 ret = nf_register_net_hook(net, elem);
414 if (ret && ret != -ENOENT)
415 goto out_undo;
416 }
417 rtnl_unlock();
418 return 0;
419
420out_undo:
421 list_for_each_entry_continue_reverse(elem, &nf_hook_list, list)
422 nf_unregister_net_hook(net, elem);
423 rtnl_unlock();
424 return ret;
425}
426
427static void nf_unregister_hook_list(struct net *net)
428{
429 struct nf_hook_ops *elem;
430
431 rtnl_lock();
432 list_for_each_entry(elem, &nf_hook_list, list)
433 nf_unregister_net_hook(net, elem);
434 rtnl_unlock();
435}
436
Gao fengf3c1a442013-03-24 23:50:39 +0000437static int __net_init netfilter_net_init(struct net *net)
438{
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500439 int i, h, ret;
440
441 for (i = 0; i < ARRAY_SIZE(net->nf.hooks); i++) {
442 for (h = 0; h < NF_MAX_HOOKS; h++)
443 INIT_LIST_HEAD(&net->nf.hooks[i][h]);
444 }
445
Gao fengf3c1a442013-03-24 23:50:39 +0000446#ifdef CONFIG_PROC_FS
447 net->nf.proc_netfilter = proc_net_mkdir(net, "netfilter",
448 net->proc_net);
Pablo Neira Ayuso12202fa2013-04-05 19:40:10 +0200449 if (!net->nf.proc_netfilter) {
450 if (!net_eq(net, &init_net))
451 pr_err("cannot create netfilter proc entry");
452
Gao fengf3c1a442013-03-24 23:50:39 +0000453 return -ENOMEM;
454 }
455#endif
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500456 ret = nf_register_hook_list(net);
457 if (ret)
458 remove_proc_entry("netfilter", net->proc_net);
459
460 return ret;
Gao fengf3c1a442013-03-24 23:50:39 +0000461}
462
463static void __net_exit netfilter_net_exit(struct net *net)
464{
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500465 nf_unregister_hook_list(net);
Gao fengf3c1a442013-03-24 23:50:39 +0000466 remove_proc_entry("netfilter", net->proc_net);
467}
468
469static struct pernet_operations netfilter_net_ops = {
470 .init = netfilter_net_init,
471 .exit = netfilter_net_exit,
472};
473
Pablo Neira Ayuso6d11cfd2013-05-22 22:42:36 +0000474int __init netfilter_init(void)
Harald Weltef6ebe772005-08-09 20:21:49 -0700475{
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500476 int ret;
Harald Weltef6ebe772005-08-09 20:21:49 -0700477
Pablo Neira Ayuso6d11cfd2013-05-22 22:42:36 +0000478 ret = register_pernet_subsys(&netfilter_net_ops);
479 if (ret < 0)
480 goto err;
Harald Weltef6ebe772005-08-09 20:21:49 -0700481
Pablo Neira Ayuso6d11cfd2013-05-22 22:42:36 +0000482 ret = netfilter_log_init();
483 if (ret < 0)
484 goto err_pernet;
485
486 return 0;
487err_pernet:
488 unregister_pernet_subsys(&netfilter_net_ops);
489err:
490 return ret;
Harald Weltef6ebe772005-08-09 20:21:49 -0700491}