blob: d869ea50623e095e30154c8d684106361b05a39f [file] [log] [blame]
YOSHIFUJI Hideaki601e68e2007-02-12 11:15:49 -08001/* netfilter.c: look after the filters for various protocols.
Harald Weltef6ebe772005-08-09 20:21:49 -07002 * Heavily influenced by the old firewall.c by David Bonn and Alan Cox.
3 *
4 * Thanks to Rob `CmdrTaco' Malda for not influencing this code in any
5 * way.
6 *
7 * Rusty Russell (C)2000 -- This code is GPL.
Patrick McHardyf229f6c2013-04-06 15:24:29 +02008 * Patrick McHardy (c) 2006-2012
Harald Weltef6ebe772005-08-09 20:21:49 -07009 */
Harald Weltef6ebe772005-08-09 20:21:49 -070010#include <linux/kernel.h>
11#include <linux/netfilter.h>
12#include <net/protocol.h>
13#include <linux/init.h>
14#include <linux/skbuff.h>
15#include <linux/wait.h>
16#include <linux/module.h>
17#include <linux/interrupt.h>
18#include <linux/if.h>
19#include <linux/netdevice.h>
Florian Westphal56768642014-11-13 10:04:16 +010020#include <linux/netfilter_ipv6.h>
Harald Weltef6ebe772005-08-09 20:21:49 -070021#include <linux/inetdevice.h>
22#include <linux/proc_fs.h>
Patrick McHardyd486dd12007-02-12 11:09:55 -080023#include <linux/mutex.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090024#include <linux/slab.h>
Aaron Conolee3b37f12016-09-21 11:35:07 -040025#include <linux/rcupdate.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020026#include <net/net_namespace.h>
Harald Weltef6ebe772005-08-09 20:21:49 -070027#include <net/sock.h>
28
29#include "nf_internals.h"
30
Patrick McHardyd486dd12007-02-12 11:09:55 -080031static DEFINE_MUTEX(afinfo_mutex);
Patrick McHardybce80322006-04-06 14:18:09 -070032
Arnd Bergmann0906a372010-03-09 20:59:15 +010033const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly;
Patrick McHardybce80322006-04-06 14:18:09 -070034EXPORT_SYMBOL(nf_afinfo);
Florian Westphal2a7851b2013-05-17 03:56:10 +000035const struct nf_ipv6_ops __rcu *nf_ipv6_ops __read_mostly;
36EXPORT_SYMBOL_GPL(nf_ipv6_ops);
Patrick McHardybce80322006-04-06 14:18:09 -070037
Florian Westphale7c88992015-07-14 17:51:07 +020038DEFINE_PER_CPU(bool, nf_skb_duplicated);
39EXPORT_SYMBOL_GPL(nf_skb_duplicated);
40
Patrick McHardy1e796fd2007-12-17 22:42:27 -080041int nf_register_afinfo(const struct nf_afinfo *afinfo)
Patrick McHardybce80322006-04-06 14:18:09 -070042{
Pablo Neira Ayuso7926dbf2014-07-31 20:38:46 +020043 mutex_lock(&afinfo_mutex);
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +000044 RCU_INIT_POINTER(nf_afinfo[afinfo->family], afinfo);
Patrick McHardyd486dd12007-02-12 11:09:55 -080045 mutex_unlock(&afinfo_mutex);
Patrick McHardybce80322006-04-06 14:18:09 -070046 return 0;
47}
48EXPORT_SYMBOL_GPL(nf_register_afinfo);
49
Patrick McHardy1e796fd2007-12-17 22:42:27 -080050void nf_unregister_afinfo(const struct nf_afinfo *afinfo)
Patrick McHardybce80322006-04-06 14:18:09 -070051{
Patrick McHardyd486dd12007-02-12 11:09:55 -080052 mutex_lock(&afinfo_mutex);
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +000053 RCU_INIT_POINTER(nf_afinfo[afinfo->family], NULL);
Patrick McHardyd486dd12007-02-12 11:09:55 -080054 mutex_unlock(&afinfo_mutex);
Patrick McHardybce80322006-04-06 14:18:09 -070055 synchronize_rcu();
56}
57EXPORT_SYMBOL_GPL(nf_unregister_afinfo);
58
Zhouyi Zhoud1c85c22014-08-22 10:40:15 +080059#ifdef HAVE_JUMP_LABEL
Ingo Molnarc5905af2012-02-24 08:31:31 +010060struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
Eric Dumazeta2d7ec52011-11-18 17:32:46 +000061EXPORT_SYMBOL(nf_hooks_needed);
62#endif
63
Patrick McHardyfd706d62007-02-12 11:10:14 -080064static DEFINE_MUTEX(nf_hook_mutex);
Aaron Conolee3b37f12016-09-21 11:35:07 -040065#define nf_entry_dereference(e) \
66 rcu_dereference_protected(e, lockdep_is_held(&nf_hook_mutex))
Harald Weltef6ebe772005-08-09 20:21:49 -070067
Linus Torvaldsbd3769b2016-10-10 22:39:04 -070068static struct nf_hook_entry __rcu **nf_hook_entry_head(struct net *net, const struct nf_hook_ops *reg)
Eric W. Biederman0edcf282015-07-10 18:14:30 -050069{
Eric W. Biederman0edcf282015-07-10 18:14:30 -050070 if (reg->pf != NFPROTO_NETDEV)
Linus Torvaldsbd3769b2016-10-10 22:39:04 -070071 return net->nf.hooks[reg->pf]+reg->hooknum;
Eric W. Biederman0edcf282015-07-10 18:14:30 -050072
Aaron Conole7816ec52016-09-28 11:35:15 -040073#ifdef CONFIG_NETFILTER_INGRESS
Linus Torvaldsbd3769b2016-10-10 22:39:04 -070074 if (reg->hooknum == NF_NETDEV_INGRESS) {
75 if (reg->dev && dev_net(reg->dev) == net)
76 return &reg->dev->nf_hooks_ingress;
Aaron Conolee3b37f12016-09-21 11:35:07 -040077 }
Linus Torvaldsbd3769b2016-10-10 22:39:04 -070078#endif
79 return NULL;
Aaron Conolee3b37f12016-09-21 11:35:07 -040080}
Pablo Neira Ayuso7181eba2015-07-20 09:31:25 +020081
Eric W. Biederman085db2c2015-07-10 18:15:06 -050082int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg)
Harald Weltef6ebe772005-08-09 20:21:49 -070083{
Linus Torvaldsbd3769b2016-10-10 22:39:04 -070084 struct nf_hook_entry __rcu **pp;
85 struct nf_hook_entry *entry, *p;
Harald Weltef6ebe772005-08-09 20:21:49 -070086
Aaron Conole7816ec52016-09-28 11:35:15 -040087 if (reg->pf == NFPROTO_NETDEV) {
88#ifndef CONFIG_NETFILTER_INGRESS
89 if (reg->hooknum == NF_NETDEV_INGRESS)
90 return -EOPNOTSUPP;
91#endif
92 if (reg->hooknum != NF_NETDEV_INGRESS ||
93 !reg->dev || dev_net(reg->dev) != net)
94 return -EINVAL;
95 }
Aaron Conoled4bb5ca2016-09-21 11:35:05 -040096
Linus Torvaldsbd3769b2016-10-10 22:39:04 -070097 pp = nf_hook_entry_head(net, reg);
98 if (!pp)
99 return -EINVAL;
100
Pablo Neira Ayuso7181eba2015-07-20 09:31:25 +0200101 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
102 if (!entry)
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500103 return -ENOMEM;
104
Pablo Neira Ayuso7181eba2015-07-20 09:31:25 +0200105 entry->orig_ops = reg;
106 entry->ops = *reg;
Aaron Conolee3b37f12016-09-21 11:35:07 -0400107 entry->next = NULL;
Pablo Neirae687ad62015-05-13 18:19:38 +0200108
Eric W. Biederman0edcf282015-07-10 18:14:30 -0500109 mutex_lock(&nf_hook_mutex);
Aaron Conolee3b37f12016-09-21 11:35:07 -0400110
Linus Torvaldsbd3769b2016-10-10 22:39:04 -0700111 /* Find the spot in the list */
112 while ((p = nf_entry_dereference(*pp)) != NULL) {
113 if (reg->priority < p->orig_ops->priority)
114 break;
115 pp = &p->next;
Harald Weltef6ebe772005-08-09 20:21:49 -0700116 }
Linus Torvaldsbd3769b2016-10-10 22:39:04 -0700117 rcu_assign_pointer(entry->next, p);
118 rcu_assign_pointer(*pp, entry);
Aaron Conolee3b37f12016-09-21 11:35:07 -0400119
Patrick McHardyfd706d62007-02-12 11:10:14 -0800120 mutex_unlock(&nf_hook_mutex);
Eric W. Biederman4c091152015-07-10 18:13:58 -0500121#ifdef CONFIG_NETFILTER_INGRESS
122 if (reg->pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS)
123 net_inc_ingress_queue();
124#endif
Zhouyi Zhoud1c85c22014-08-22 10:40:15 +0800125#ifdef HAVE_JUMP_LABEL
Ingo Molnarc5905af2012-02-24 08:31:31 +0100126 static_key_slow_inc(&nf_hooks_needed[reg->pf][reg->hooknum]);
Eric Dumazeta2d7ec52011-11-18 17:32:46 +0000127#endif
Harald Weltef6ebe772005-08-09 20:21:49 -0700128 return 0;
129}
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500130EXPORT_SYMBOL(nf_register_net_hook);
Harald Weltef6ebe772005-08-09 20:21:49 -0700131
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500132void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
Harald Weltef6ebe772005-08-09 20:21:49 -0700133{
Linus Torvaldsbd3769b2016-10-10 22:39:04 -0700134 struct nf_hook_entry __rcu **pp;
135 struct nf_hook_entry *p;
136
137 pp = nf_hook_entry_head(net, reg);
138 if (WARN_ON_ONCE(!pp))
139 return;
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500140
Patrick McHardyfd706d62007-02-12 11:10:14 -0800141 mutex_lock(&nf_hook_mutex);
Linus Torvaldsbd3769b2016-10-10 22:39:04 -0700142 while ((p = nf_entry_dereference(*pp)) != NULL) {
143 if (p->orig_ops == reg) {
144 rcu_assign_pointer(*pp, p->next);
145 break;
Aaron Conolee3b37f12016-09-21 11:35:07 -0400146 }
Linus Torvaldsbd3769b2016-10-10 22:39:04 -0700147 pp = &p->next;
Aaron Conolee3b37f12016-09-21 11:35:07 -0400148 }
Patrick McHardyfd706d62007-02-12 11:10:14 -0800149 mutex_unlock(&nf_hook_mutex);
Linus Torvaldsbd3769b2016-10-10 22:39:04 -0700150 if (!p) {
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500151 WARN(1, "nf_unregister_net_hook: hook not found!\n");
152 return;
153 }
Pablo Neirae687ad62015-05-13 18:19:38 +0200154#ifdef CONFIG_NETFILTER_INGRESS
Eric W. Biederman4c091152015-07-10 18:13:58 -0500155 if (reg->pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS)
156 net_dec_ingress_queue();
Pablo Neirae687ad62015-05-13 18:19:38 +0200157#endif
Zhouyi Zhoud1c85c22014-08-22 10:40:15 +0800158#ifdef HAVE_JUMP_LABEL
Ingo Molnarc5905af2012-02-24 08:31:31 +0100159 static_key_slow_dec(&nf_hooks_needed[reg->pf][reg->hooknum]);
Eric Dumazeta2d7ec52011-11-18 17:32:46 +0000160#endif
Harald Weltef6ebe772005-08-09 20:21:49 -0700161 synchronize_net();
Linus Torvaldsbd3769b2016-10-10 22:39:04 -0700162 nf_queue_nf_hook_drop(net, p);
Florian Westphal514ed622015-10-08 23:38:07 +0200163 /* other cpu might still process nfqueue verdict that used reg */
164 synchronize_net();
Linus Torvaldsbd3769b2016-10-10 22:39:04 -0700165 kfree(p);
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500166}
167EXPORT_SYMBOL(nf_unregister_net_hook);
168
169int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
170 unsigned int n)
171{
172 unsigned int i;
173 int err = 0;
174
175 for (i = 0; i < n; i++) {
176 err = nf_register_net_hook(net, &reg[i]);
177 if (err)
178 goto err;
179 }
180 return err;
181
182err:
183 if (i > 0)
184 nf_unregister_net_hooks(net, reg, i);
185 return err;
186}
187EXPORT_SYMBOL(nf_register_net_hooks);
188
189void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
190 unsigned int n)
191{
192 while (n-- > 0)
193 nf_unregister_net_hook(net, &reg[n]);
194}
195EXPORT_SYMBOL(nf_unregister_net_hooks);
196
197static LIST_HEAD(nf_hook_list);
198
Mahesh Bandeware8bffe02016-09-16 12:59:13 -0700199static int _nf_register_hook(struct nf_hook_ops *reg)
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500200{
201 struct net *net, *last;
202 int ret;
203
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500204 for_each_net(net) {
205 ret = nf_register_net_hook(net, reg);
206 if (ret && ret != -ENOENT)
207 goto rollback;
208 }
209 list_add_tail(&reg->list, &nf_hook_list);
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500210
211 return 0;
212rollback:
213 last = net;
214 for_each_net(net) {
215 if (net == last)
216 break;
217 nf_unregister_net_hook(net, reg);
218 }
Mahesh Bandeware8bffe02016-09-16 12:59:13 -0700219 return ret;
220}
221
222int nf_register_hook(struct nf_hook_ops *reg)
223{
224 int ret;
225
226 rtnl_lock();
227 ret = _nf_register_hook(reg);
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500228 rtnl_unlock();
Mahesh Bandeware8bffe02016-09-16 12:59:13 -0700229
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500230 return ret;
231}
232EXPORT_SYMBOL(nf_register_hook);
233
Mahesh Bandeware8bffe02016-09-16 12:59:13 -0700234static void _nf_unregister_hook(struct nf_hook_ops *reg)
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500235{
236 struct net *net;
237
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500238 list_del(&reg->list);
239 for_each_net(net)
240 nf_unregister_net_hook(net, reg);
Mahesh Bandeware8bffe02016-09-16 12:59:13 -0700241}
242
243void nf_unregister_hook(struct nf_hook_ops *reg)
244{
245 rtnl_lock();
246 _nf_unregister_hook(reg);
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500247 rtnl_unlock();
Harald Weltef6ebe772005-08-09 20:21:49 -0700248}
249EXPORT_SYMBOL(nf_unregister_hook);
250
Patrick McHardy972d1cb2006-04-06 14:09:12 -0700251int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n)
252{
253 unsigned int i;
254 int err = 0;
255
256 for (i = 0; i < n; i++) {
257 err = nf_register_hook(&reg[i]);
258 if (err)
259 goto err;
260 }
261 return err;
262
263err:
264 if (i > 0)
265 nf_unregister_hooks(reg, i);
266 return err;
267}
268EXPORT_SYMBOL(nf_register_hooks);
269
Mahesh Bandeware8bffe02016-09-16 12:59:13 -0700270/* Caller MUST take rtnl_lock() */
271int _nf_register_hooks(struct nf_hook_ops *reg, unsigned int n)
272{
273 unsigned int i;
274 int err = 0;
275
276 for (i = 0; i < n; i++) {
277 err = _nf_register_hook(&reg[i]);
278 if (err)
279 goto err;
280 }
281 return err;
282
283err:
284 if (i > 0)
285 _nf_unregister_hooks(reg, i);
286 return err;
287}
288EXPORT_SYMBOL(_nf_register_hooks);
289
Patrick McHardy972d1cb2006-04-06 14:09:12 -0700290void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n)
291{
Changli Gaof68c5302010-10-04 22:24:12 +0200292 while (n-- > 0)
293 nf_unregister_hook(&reg[n]);
Patrick McHardy972d1cb2006-04-06 14:09:12 -0700294}
295EXPORT_SYMBOL(nf_unregister_hooks);
296
Mahesh Bandeware8bffe02016-09-16 12:59:13 -0700297/* Caller MUST take rtnl_lock */
298void _nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n)
299{
300 while (n-- > 0)
301 _nf_unregister_hook(&reg[n]);
302}
303EXPORT_SYMBOL(_nf_unregister_hooks);
304
Aaron Conolee3b37f12016-09-21 11:35:07 -0400305unsigned int nf_iterate(struct sk_buff *skb,
David S. Millercfdfab32015-04-03 16:23:58 -0400306 struct nf_hook_state *state,
Aaron Conolee3b37f12016-09-21 11:35:07 -0400307 struct nf_hook_entry **entryp)
Harald Weltef6ebe772005-08-09 20:21:49 -0700308{
309 unsigned int verdict;
310
311 /*
312 * The caller must not block between calls to this
313 * function because of risk of continuing from deleted element.
314 */
Aaron Conolee3b37f12016-09-21 11:35:07 -0400315 while (*entryp) {
316 if (state->thresh > (*entryp)->ops.priority) {
317 *entryp = rcu_dereference((*entryp)->next);
Harald Weltef6ebe772005-08-09 20:21:49 -0700318 continue;
Aaron Conolee3b37f12016-09-21 11:35:07 -0400319 }
Harald Weltef6ebe772005-08-09 20:21:49 -0700320
321 /* Optimization: we don't need to hold module
YOSHIFUJI Hideaki601e68e2007-02-12 11:15:49 -0800322 reference here, since function can't sleep. --RR */
Patrick McHardyde9963f2011-02-14 17:35:07 +0100323repeat:
Aaron Conolee3b37f12016-09-21 11:35:07 -0400324 verdict = (*entryp)->ops.hook((*entryp)->ops.priv, skb, state);
Harald Weltef6ebe772005-08-09 20:21:49 -0700325 if (verdict != NF_ACCEPT) {
326#ifdef CONFIG_NETFILTER_DEBUG
327 if (unlikely((verdict & NF_VERDICT_MASK)
328 > NF_MAX_VERDICT)) {
329 NFDEBUG("Evil return from %p(%u).\n",
Aaron Conolee3b37f12016-09-21 11:35:07 -0400330 (*entryp)->ops.hook, state->hook);
331 *entryp = rcu_dereference((*entryp)->next);
Harald Weltef6ebe772005-08-09 20:21:49 -0700332 continue;
333 }
334#endif
Michael Wang2a6decf2012-08-22 19:59:57 +0000335 if (verdict != NF_REPEAT)
Harald Weltef6ebe772005-08-09 20:21:49 -0700336 return verdict;
Patrick McHardyde9963f2011-02-14 17:35:07 +0100337 goto repeat;
Harald Weltef6ebe772005-08-09 20:21:49 -0700338 }
Aaron Conolee3b37f12016-09-21 11:35:07 -0400339 *entryp = rcu_dereference((*entryp)->next);
Harald Weltef6ebe772005-08-09 20:21:49 -0700340 }
341 return NF_ACCEPT;
342}
343
344
345/* Returns 1 if okfn() needs to be executed by the caller,
Aaron Conolee2361cb2016-09-21 11:35:04 -0400346 * -EPERM for NF_DROP, 0 otherwise. Caller must hold rcu_read_lock. */
David S. Millercfdfab32015-04-03 16:23:58 -0400347int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state)
Harald Weltef6ebe772005-08-09 20:21:49 -0700348{
Aaron Conolee3b37f12016-09-21 11:35:07 -0400349 struct nf_hook_entry *entry;
Harald Weltef6ebe772005-08-09 20:21:49 -0700350 unsigned int verdict;
351 int ret = 0;
352
Aaron Conolee3b37f12016-09-21 11:35:07 -0400353 entry = rcu_dereference(state->hook_entries);
Harald Weltef6ebe772005-08-09 20:21:49 -0700354next_hook:
Aaron Conolee3b37f12016-09-21 11:35:07 -0400355 verdict = nf_iterate(skb, state, &entry);
Harald Weltef6ebe772005-08-09 20:21:49 -0700356 if (verdict == NF_ACCEPT || verdict == NF_STOP) {
357 ret = 1;
Eric Parisda683652010-11-16 11:52:38 +0000358 } else if ((verdict & NF_VERDICT_MASK) == NF_DROP) {
Herbert Xu3db05fe2007-10-15 00:53:15 -0700359 kfree_skb(skb);
Florian Westphalf615df72011-01-18 15:52:14 +0100360 ret = NF_DROP_GETERR(verdict);
Eric Parisda683652010-11-16 11:52:38 +0000361 if (ret == 0)
362 ret = -EPERM;
Patrick McHardyf9c63992007-12-05 01:27:46 -0800363 } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
Pablo Neira Ayuso7034b562016-10-17 18:05:32 +0100364 ret = nf_queue(skb, state, &entry, verdict);
365 if (ret == 1 && entry)
366 goto next_hook;
Debabrata Banerjee0cab6942017-12-13 15:33:37 -0500367 } else {
368 /* Implicit handling for NF_STOLEN, as well as any other
369 * non conventional verdicts.
370 */
371 ret = 0;
Harald Weltef6ebe772005-08-09 20:21:49 -0700372 }
Harald Weltef6ebe772005-08-09 20:21:49 -0700373 return ret;
374}
375EXPORT_SYMBOL(nf_hook_slow);
376
377
Herbert Xu37d41872007-10-14 00:39:18 -0700378int skb_make_writable(struct sk_buff *skb, unsigned int writable_len)
Harald Weltef6ebe772005-08-09 20:21:49 -0700379{
Herbert Xu37d41872007-10-14 00:39:18 -0700380 if (writable_len > skb->len)
Harald Weltef6ebe772005-08-09 20:21:49 -0700381 return 0;
382
383 /* Not exclusive use of packet? Must copy. */
Herbert Xu37d41872007-10-14 00:39:18 -0700384 if (!skb_cloned(skb)) {
385 if (writable_len <= skb_headlen(skb))
386 return 1;
387 } else if (skb_clone_writable(skb, writable_len))
388 return 1;
Harald Weltef6ebe772005-08-09 20:21:49 -0700389
Herbert Xu37d41872007-10-14 00:39:18 -0700390 if (writable_len <= skb_headlen(skb))
391 writable_len = 0;
392 else
393 writable_len -= skb_headlen(skb);
Harald Weltef6ebe772005-08-09 20:21:49 -0700394
Herbert Xu37d41872007-10-14 00:39:18 -0700395 return !!__pskb_pull_tail(skb, writable_len);
Harald Weltef6ebe772005-08-09 20:21:49 -0700396}
397EXPORT_SYMBOL(skb_make_writable);
398
Pablo Neira Ayusob7bd1802015-09-30 22:53:44 +0100399/* This needs to be compiled in any case to avoid dependencies between the
400 * nfnetlink_queue code and nf_conntrack.
401 */
Ken-ichirou MATSUZAWAa4b47662015-10-05 11:47:13 +0900402struct nfnl_ct_hook __rcu *nfnl_ct_hook __read_mostly;
403EXPORT_SYMBOL_GPL(nfnl_ct_hook);
Pablo Neira Ayusob7bd1802015-09-30 22:53:44 +0100404
Igor Maravićc0cd1152011-12-12 02:58:24 +0000405#if IS_ENABLED(CONFIG_NF_CONNTRACK)
Harald Weltef6ebe772005-08-09 20:21:49 -0700406/* This does not belong here, but locally generated errors need it if connection
407 tracking in use: without this, connection may not be in hash table, and hence
408 manufactured ICMP or RST packets will not be associated with it. */
Patrick McHardy312a0c162013-07-28 22:54:08 +0200409void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *)
410 __rcu __read_mostly;
Harald Weltef6ebe772005-08-09 20:21:49 -0700411EXPORT_SYMBOL(ip_ct_attach);
412
Patrick McHardy312a0c162013-07-28 22:54:08 +0200413void nf_ct_attach(struct sk_buff *new, const struct sk_buff *skb)
Harald Weltef6ebe772005-08-09 20:21:49 -0700414{
Patrick McHardy312a0c162013-07-28 22:54:08 +0200415 void (*attach)(struct sk_buff *, const struct sk_buff *);
Harald Weltef6ebe772005-08-09 20:21:49 -0700416
Patrick McHardyc3a47ab2007-02-12 11:09:19 -0800417 if (skb->nfct) {
418 rcu_read_lock();
419 attach = rcu_dereference(ip_ct_attach);
420 if (attach)
421 attach(new, skb);
422 rcu_read_unlock();
Harald Weltef6ebe772005-08-09 20:21:49 -0700423 }
424}
425EXPORT_SYMBOL(nf_ct_attach);
Yasuyuki Kozakaide6e05c2007-03-23 11:17:27 -0700426
Eric Dumazet0e60ebe2010-11-15 18:17:21 +0100427void (*nf_ct_destroy)(struct nf_conntrack *) __rcu __read_mostly;
Yasuyuki Kozakaide6e05c2007-03-23 11:17:27 -0700428EXPORT_SYMBOL(nf_ct_destroy);
429
430void nf_conntrack_destroy(struct nf_conntrack *nfct)
431{
432 void (*destroy)(struct nf_conntrack *);
433
434 rcu_read_lock();
435 destroy = rcu_dereference(nf_ct_destroy);
436 BUG_ON(destroy == NULL);
437 destroy(nfct);
438 rcu_read_unlock();
439}
440EXPORT_SYMBOL(nf_conntrack_destroy);
Pablo Neira Ayuso9cb01762012-06-07 12:13:39 +0200441
Daniel Borkmann62da9862015-09-03 01:26:07 +0200442/* Built-in default zone used e.g. by modules. */
443const struct nf_conntrack_zone nf_ct_zone_dflt = {
444 .id = NF_CT_DEFAULT_ZONE_ID,
445 .dir = NF_CT_DEFAULT_ZONE_DIR,
446};
447EXPORT_SYMBOL_GPL(nf_ct_zone_dflt);
Yasuyuki Kozakaide6e05c2007-03-23 11:17:27 -0700448#endif /* CONFIG_NF_CONNTRACK */
Harald Weltef6ebe772005-08-09 20:21:49 -0700449
Patrick McHardyc7232c92012-08-26 19:14:06 +0200450#ifdef CONFIG_NF_NAT_NEEDED
451void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
452EXPORT_SYMBOL(nf_nat_decode_session_hook);
453#endif
454
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500455static int nf_register_hook_list(struct net *net)
456{
457 struct nf_hook_ops *elem;
458 int ret;
459
460 rtnl_lock();
461 list_for_each_entry(elem, &nf_hook_list, list) {
462 ret = nf_register_net_hook(net, elem);
463 if (ret && ret != -ENOENT)
464 goto out_undo;
465 }
466 rtnl_unlock();
467 return 0;
468
469out_undo:
470 list_for_each_entry_continue_reverse(elem, &nf_hook_list, list)
471 nf_unregister_net_hook(net, elem);
472 rtnl_unlock();
473 return ret;
474}
475
476static void nf_unregister_hook_list(struct net *net)
477{
478 struct nf_hook_ops *elem;
479
480 rtnl_lock();
481 list_for_each_entry(elem, &nf_hook_list, list)
482 nf_unregister_net_hook(net, elem);
483 rtnl_unlock();
484}
485
Gao fengf3c1a442013-03-24 23:50:39 +0000486static int __net_init netfilter_net_init(struct net *net)
487{
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500488 int i, h, ret;
489
490 for (i = 0; i < ARRAY_SIZE(net->nf.hooks); i++) {
491 for (h = 0; h < NF_MAX_HOOKS; h++)
Aaron Conolee3b37f12016-09-21 11:35:07 -0400492 RCU_INIT_POINTER(net->nf.hooks[i][h], NULL);
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500493 }
494
Gao fengf3c1a442013-03-24 23:50:39 +0000495#ifdef CONFIG_PROC_FS
496 net->nf.proc_netfilter = proc_net_mkdir(net, "netfilter",
497 net->proc_net);
Pablo Neira Ayuso12202fa2013-04-05 19:40:10 +0200498 if (!net->nf.proc_netfilter) {
499 if (!net_eq(net, &init_net))
500 pr_err("cannot create netfilter proc entry");
501
Gao fengf3c1a442013-03-24 23:50:39 +0000502 return -ENOMEM;
503 }
504#endif
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500505 ret = nf_register_hook_list(net);
506 if (ret)
507 remove_proc_entry("netfilter", net->proc_net);
508
509 return ret;
Gao fengf3c1a442013-03-24 23:50:39 +0000510}
511
512static void __net_exit netfilter_net_exit(struct net *net)
513{
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500514 nf_unregister_hook_list(net);
Gao fengf3c1a442013-03-24 23:50:39 +0000515 remove_proc_entry("netfilter", net->proc_net);
516}
517
518static struct pernet_operations netfilter_net_ops = {
519 .init = netfilter_net_init,
520 .exit = netfilter_net_exit,
521};
522
Pablo Neira Ayuso6d11cfd2013-05-22 22:42:36 +0000523int __init netfilter_init(void)
Harald Weltef6ebe772005-08-09 20:21:49 -0700524{
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500525 int ret;
Harald Weltef6ebe772005-08-09 20:21:49 -0700526
Pablo Neira Ayuso6d11cfd2013-05-22 22:42:36 +0000527 ret = register_pernet_subsys(&netfilter_net_ops);
528 if (ret < 0)
529 goto err;
Harald Weltef6ebe772005-08-09 20:21:49 -0700530
Pablo Neira Ayuso6d11cfd2013-05-22 22:42:36 +0000531 ret = netfilter_log_init();
532 if (ret < 0)
533 goto err_pernet;
534
535 return 0;
536err_pernet:
537 unregister_pernet_subsys(&netfilter_net_ops);
538err:
539 return ret;
Harald Weltef6ebe772005-08-09 20:21:49 -0700540}