blob: c9d90eb64046c518231f3fb20b4d380490857b47 [file] [log] [blame]
YOSHIFUJI Hideaki601e68e2007-02-12 11:15:49 -08001/* netfilter.c: look after the filters for various protocols.
Harald Weltef6ebe772005-08-09 20:21:49 -07002 * Heavily influenced by the old firewall.c by David Bonn and Alan Cox.
3 *
4 * Thanks to Rob `CmdrTaco' Malda for not influencing this code in any
5 * way.
6 *
7 * Rusty Russell (C)2000 -- This code is GPL.
Patrick McHardyf229f6c2013-04-06 15:24:29 +02008 * Patrick McHardy (c) 2006-2012
Harald Weltef6ebe772005-08-09 20:21:49 -07009 */
Harald Weltef6ebe772005-08-09 20:21:49 -070010#include <linux/kernel.h>
11#include <linux/netfilter.h>
12#include <net/protocol.h>
13#include <linux/init.h>
14#include <linux/skbuff.h>
15#include <linux/wait.h>
16#include <linux/module.h>
17#include <linux/interrupt.h>
18#include <linux/if.h>
19#include <linux/netdevice.h>
Florian Westphal56768642014-11-13 10:04:16 +010020#include <linux/netfilter_ipv6.h>
Harald Weltef6ebe772005-08-09 20:21:49 -070021#include <linux/inetdevice.h>
22#include <linux/proc_fs.h>
Patrick McHardyd486dd12007-02-12 11:09:55 -080023#include <linux/mutex.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090024#include <linux/slab.h>
Aaron Conolee3b37f12016-09-21 11:35:07 -040025#include <linux/rcupdate.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020026#include <net/net_namespace.h>
Harald Weltef6ebe772005-08-09 20:21:49 -070027#include <net/sock.h>
28
29#include "nf_internals.h"
30
Patrick McHardyd486dd12007-02-12 11:09:55 -080031static DEFINE_MUTEX(afinfo_mutex);
Patrick McHardybce80322006-04-06 14:18:09 -070032
Arnd Bergmann0906a372010-03-09 20:59:15 +010033const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly;
Patrick McHardybce80322006-04-06 14:18:09 -070034EXPORT_SYMBOL(nf_afinfo);
Florian Westphal2a7851b2013-05-17 03:56:10 +000035const struct nf_ipv6_ops __rcu *nf_ipv6_ops __read_mostly;
36EXPORT_SYMBOL_GPL(nf_ipv6_ops);
Patrick McHardybce80322006-04-06 14:18:09 -070037
Florian Westphale7c88992015-07-14 17:51:07 +020038DEFINE_PER_CPU(bool, nf_skb_duplicated);
39EXPORT_SYMBOL_GPL(nf_skb_duplicated);
40
Patrick McHardy1e796fd2007-12-17 22:42:27 -080041int nf_register_afinfo(const struct nf_afinfo *afinfo)
Patrick McHardybce80322006-04-06 14:18:09 -070042{
Pablo Neira Ayuso7926dbf2014-07-31 20:38:46 +020043 mutex_lock(&afinfo_mutex);
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +000044 RCU_INIT_POINTER(nf_afinfo[afinfo->family], afinfo);
Patrick McHardyd486dd12007-02-12 11:09:55 -080045 mutex_unlock(&afinfo_mutex);
Patrick McHardybce80322006-04-06 14:18:09 -070046 return 0;
47}
48EXPORT_SYMBOL_GPL(nf_register_afinfo);
49
Patrick McHardy1e796fd2007-12-17 22:42:27 -080050void nf_unregister_afinfo(const struct nf_afinfo *afinfo)
Patrick McHardybce80322006-04-06 14:18:09 -070051{
Patrick McHardyd486dd12007-02-12 11:09:55 -080052 mutex_lock(&afinfo_mutex);
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +000053 RCU_INIT_POINTER(nf_afinfo[afinfo->family], NULL);
Patrick McHardyd486dd12007-02-12 11:09:55 -080054 mutex_unlock(&afinfo_mutex);
Patrick McHardybce80322006-04-06 14:18:09 -070055 synchronize_rcu();
56}
57EXPORT_SYMBOL_GPL(nf_unregister_afinfo);
58
Zhouyi Zhoud1c85c22014-08-22 10:40:15 +080059#ifdef HAVE_JUMP_LABEL
Ingo Molnarc5905af2012-02-24 08:31:31 +010060struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
Eric Dumazeta2d7ec52011-11-18 17:32:46 +000061EXPORT_SYMBOL(nf_hooks_needed);
62#endif
63
Patrick McHardyfd706d62007-02-12 11:10:14 -080064static DEFINE_MUTEX(nf_hook_mutex);
Aaron Conolee3b37f12016-09-21 11:35:07 -040065#define nf_entry_dereference(e) \
66 rcu_dereference_protected(e, lockdep_is_held(&nf_hook_mutex))
Harald Weltef6ebe772005-08-09 20:21:49 -070067
Aaron Conolee3b37f12016-09-21 11:35:07 -040068static struct nf_hook_entry *nf_hook_entry_head(struct net *net,
69 const struct nf_hook_ops *reg)
Eric W. Biederman0edcf282015-07-10 18:14:30 -050070{
Aaron Conolee3b37f12016-09-21 11:35:07 -040071 struct nf_hook_entry *hook_head = NULL;
Eric W. Biederman0edcf282015-07-10 18:14:30 -050072
73 if (reg->pf != NFPROTO_NETDEV)
Aaron Conolee3b37f12016-09-21 11:35:07 -040074 hook_head = nf_entry_dereference(net->nf.hooks[reg->pf]
75 [reg->hooknum]);
Eric W. Biederman0edcf282015-07-10 18:14:30 -050076 else if (reg->hooknum == NF_NETDEV_INGRESS) {
77#ifdef CONFIG_NETFILTER_INGRESS
Eric W. Biederman085db2c2015-07-10 18:15:06 -050078 if (reg->dev && dev_net(reg->dev) == net)
Aaron Conolee3b37f12016-09-21 11:35:07 -040079 hook_head =
80 nf_entry_dereference(
81 reg->dev->nf_hooks_ingress);
Eric W. Biederman0edcf282015-07-10 18:14:30 -050082#endif
83 }
Aaron Conolee3b37f12016-09-21 11:35:07 -040084 return hook_head;
Eric W. Biederman0edcf282015-07-10 18:14:30 -050085}
86
Aaron Conolee3b37f12016-09-21 11:35:07 -040087/* must hold nf_hook_mutex */
88static void nf_set_hooks_head(struct net *net, const struct nf_hook_ops *reg,
89 struct nf_hook_entry *entry)
90{
91 switch (reg->pf) {
92 case NFPROTO_NETDEV:
Aaron Conole7816ec52016-09-28 11:35:15 -040093#ifdef CONFIG_NETFILTER_INGRESS
Aaron Conolee3b37f12016-09-21 11:35:07 -040094 /* We already checked in nf_register_net_hook() that this is
95 * used from ingress.
96 */
97 rcu_assign_pointer(reg->dev->nf_hooks_ingress, entry);
Aaron Conole7816ec52016-09-28 11:35:15 -040098#endif
Aaron Conolee3b37f12016-09-21 11:35:07 -040099 break;
100 default:
101 rcu_assign_pointer(net->nf.hooks[reg->pf][reg->hooknum],
102 entry);
103 break;
104 }
105}
Pablo Neira Ayuso7181eba2015-07-20 09:31:25 +0200106
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500107int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg)
Harald Weltef6ebe772005-08-09 20:21:49 -0700108{
Aaron Conolee3b37f12016-09-21 11:35:07 -0400109 struct nf_hook_entry *hooks_entry;
Pablo Neira Ayuso7181eba2015-07-20 09:31:25 +0200110 struct nf_hook_entry *entry;
Harald Weltef6ebe772005-08-09 20:21:49 -0700111
Aaron Conole7816ec52016-09-28 11:35:15 -0400112 if (reg->pf == NFPROTO_NETDEV) {
113#ifndef CONFIG_NETFILTER_INGRESS
114 if (reg->hooknum == NF_NETDEV_INGRESS)
115 return -EOPNOTSUPP;
116#endif
117 if (reg->hooknum != NF_NETDEV_INGRESS ||
118 !reg->dev || dev_net(reg->dev) != net)
119 return -EINVAL;
120 }
Aaron Conoled4bb5ca2016-09-21 11:35:05 -0400121
Pablo Neira Ayuso7181eba2015-07-20 09:31:25 +0200122 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
123 if (!entry)
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500124 return -ENOMEM;
125
Pablo Neira Ayuso7181eba2015-07-20 09:31:25 +0200126 entry->orig_ops = reg;
127 entry->ops = *reg;
Aaron Conolee3b37f12016-09-21 11:35:07 -0400128 entry->next = NULL;
Pablo Neirae687ad62015-05-13 18:19:38 +0200129
Eric W. Biederman0edcf282015-07-10 18:14:30 -0500130 mutex_lock(&nf_hook_mutex);
Aaron Conolee3b37f12016-09-21 11:35:07 -0400131 hooks_entry = nf_hook_entry_head(net, reg);
132
133 if (hooks_entry && hooks_entry->orig_ops->priority > reg->priority) {
134 /* This is the case where we need to insert at the head */
135 entry->next = hooks_entry;
136 hooks_entry = NULL;
Harald Weltef6ebe772005-08-09 20:21:49 -0700137 }
Aaron Conolee3b37f12016-09-21 11:35:07 -0400138
139 while (hooks_entry &&
140 reg->priority >= hooks_entry->orig_ops->priority &&
141 nf_entry_dereference(hooks_entry->next)) {
142 hooks_entry = nf_entry_dereference(hooks_entry->next);
143 }
144
145 if (hooks_entry) {
146 entry->next = nf_entry_dereference(hooks_entry->next);
147 rcu_assign_pointer(hooks_entry->next, entry);
148 } else {
149 nf_set_hooks_head(net, reg, entry);
150 }
151
Patrick McHardyfd706d62007-02-12 11:10:14 -0800152 mutex_unlock(&nf_hook_mutex);
Eric W. Biederman4c091152015-07-10 18:13:58 -0500153#ifdef CONFIG_NETFILTER_INGRESS
154 if (reg->pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS)
155 net_inc_ingress_queue();
156#endif
Zhouyi Zhoud1c85c22014-08-22 10:40:15 +0800157#ifdef HAVE_JUMP_LABEL
Ingo Molnarc5905af2012-02-24 08:31:31 +0100158 static_key_slow_inc(&nf_hooks_needed[reg->pf][reg->hooknum]);
Eric Dumazeta2d7ec52011-11-18 17:32:46 +0000159#endif
Harald Weltef6ebe772005-08-09 20:21:49 -0700160 return 0;
161}
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500162EXPORT_SYMBOL(nf_register_net_hook);
Harald Weltef6ebe772005-08-09 20:21:49 -0700163
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500164void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
Harald Weltef6ebe772005-08-09 20:21:49 -0700165{
Aaron Conolee3b37f12016-09-21 11:35:07 -0400166 struct nf_hook_entry *hooks_entry;
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500167
Patrick McHardyfd706d62007-02-12 11:10:14 -0800168 mutex_lock(&nf_hook_mutex);
Aaron Conolee3b37f12016-09-21 11:35:07 -0400169 hooks_entry = nf_hook_entry_head(net, reg);
Aaron Conole5119e432016-09-28 11:35:14 -0400170 if (hooks_entry && hooks_entry->orig_ops == reg) {
Aaron Conolee3b37f12016-09-21 11:35:07 -0400171 nf_set_hooks_head(net, reg,
172 nf_entry_dereference(hooks_entry->next));
173 goto unlock;
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500174 }
Aaron Conolee3b37f12016-09-21 11:35:07 -0400175 while (hooks_entry && nf_entry_dereference(hooks_entry->next)) {
176 struct nf_hook_entry *next =
177 nf_entry_dereference(hooks_entry->next);
178 struct nf_hook_entry *nnext;
179
180 if (next->orig_ops != reg) {
181 hooks_entry = next;
182 continue;
183 }
184 nnext = nf_entry_dereference(next->next);
185 rcu_assign_pointer(hooks_entry->next, nnext);
186 hooks_entry = next;
187 break;
188 }
189
190unlock:
Patrick McHardyfd706d62007-02-12 11:10:14 -0800191 mutex_unlock(&nf_hook_mutex);
Aaron Conolee3b37f12016-09-21 11:35:07 -0400192 if (!hooks_entry) {
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500193 WARN(1, "nf_unregister_net_hook: hook not found!\n");
194 return;
195 }
Pablo Neirae687ad62015-05-13 18:19:38 +0200196#ifdef CONFIG_NETFILTER_INGRESS
Eric W. Biederman4c091152015-07-10 18:13:58 -0500197 if (reg->pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS)
198 net_dec_ingress_queue();
Pablo Neirae687ad62015-05-13 18:19:38 +0200199#endif
Zhouyi Zhoud1c85c22014-08-22 10:40:15 +0800200#ifdef HAVE_JUMP_LABEL
Ingo Molnarc5905af2012-02-24 08:31:31 +0100201 static_key_slow_dec(&nf_hooks_needed[reg->pf][reg->hooknum]);
Eric Dumazeta2d7ec52011-11-18 17:32:46 +0000202#endif
Harald Weltef6ebe772005-08-09 20:21:49 -0700203 synchronize_net();
Aaron Conolee3b37f12016-09-21 11:35:07 -0400204 nf_queue_nf_hook_drop(net, hooks_entry);
Florian Westphal514ed622015-10-08 23:38:07 +0200205 /* other cpu might still process nfqueue verdict that used reg */
206 synchronize_net();
Aaron Conolee3b37f12016-09-21 11:35:07 -0400207 kfree(hooks_entry);
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500208}
209EXPORT_SYMBOL(nf_unregister_net_hook);
210
211int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
212 unsigned int n)
213{
214 unsigned int i;
215 int err = 0;
216
217 for (i = 0; i < n; i++) {
218 err = nf_register_net_hook(net, &reg[i]);
219 if (err)
220 goto err;
221 }
222 return err;
223
224err:
225 if (i > 0)
226 nf_unregister_net_hooks(net, reg, i);
227 return err;
228}
229EXPORT_SYMBOL(nf_register_net_hooks);
230
231void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
232 unsigned int n)
233{
234 while (n-- > 0)
235 nf_unregister_net_hook(net, &reg[n]);
236}
237EXPORT_SYMBOL(nf_unregister_net_hooks);
238
239static LIST_HEAD(nf_hook_list);
240
Mahesh Bandeware8bffe02016-09-16 12:59:13 -0700241static int _nf_register_hook(struct nf_hook_ops *reg)
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500242{
243 struct net *net, *last;
244 int ret;
245
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500246 for_each_net(net) {
247 ret = nf_register_net_hook(net, reg);
248 if (ret && ret != -ENOENT)
249 goto rollback;
250 }
251 list_add_tail(&reg->list, &nf_hook_list);
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500252
253 return 0;
254rollback:
255 last = net;
256 for_each_net(net) {
257 if (net == last)
258 break;
259 nf_unregister_net_hook(net, reg);
260 }
Mahesh Bandeware8bffe02016-09-16 12:59:13 -0700261 return ret;
262}
263
264int nf_register_hook(struct nf_hook_ops *reg)
265{
266 int ret;
267
268 rtnl_lock();
269 ret = _nf_register_hook(reg);
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500270 rtnl_unlock();
Mahesh Bandeware8bffe02016-09-16 12:59:13 -0700271
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500272 return ret;
273}
274EXPORT_SYMBOL(nf_register_hook);
275
Mahesh Bandeware8bffe02016-09-16 12:59:13 -0700276static void _nf_unregister_hook(struct nf_hook_ops *reg)
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500277{
278 struct net *net;
279
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500280 list_del(&reg->list);
281 for_each_net(net)
282 nf_unregister_net_hook(net, reg);
Mahesh Bandeware8bffe02016-09-16 12:59:13 -0700283}
284
285void nf_unregister_hook(struct nf_hook_ops *reg)
286{
287 rtnl_lock();
288 _nf_unregister_hook(reg);
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500289 rtnl_unlock();
Harald Weltef6ebe772005-08-09 20:21:49 -0700290}
291EXPORT_SYMBOL(nf_unregister_hook);
292
Patrick McHardy972d1cb2006-04-06 14:09:12 -0700293int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n)
294{
295 unsigned int i;
296 int err = 0;
297
298 for (i = 0; i < n; i++) {
299 err = nf_register_hook(&reg[i]);
300 if (err)
301 goto err;
302 }
303 return err;
304
305err:
306 if (i > 0)
307 nf_unregister_hooks(reg, i);
308 return err;
309}
310EXPORT_SYMBOL(nf_register_hooks);
311
Mahesh Bandeware8bffe02016-09-16 12:59:13 -0700312/* Caller MUST take rtnl_lock() */
313int _nf_register_hooks(struct nf_hook_ops *reg, unsigned int n)
314{
315 unsigned int i;
316 int err = 0;
317
318 for (i = 0; i < n; i++) {
319 err = _nf_register_hook(&reg[i]);
320 if (err)
321 goto err;
322 }
323 return err;
324
325err:
326 if (i > 0)
327 _nf_unregister_hooks(reg, i);
328 return err;
329}
330EXPORT_SYMBOL(_nf_register_hooks);
331
Patrick McHardy972d1cb2006-04-06 14:09:12 -0700332void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n)
333{
Changli Gaof68c5302010-10-04 22:24:12 +0200334 while (n-- > 0)
335 nf_unregister_hook(&reg[n]);
Patrick McHardy972d1cb2006-04-06 14:09:12 -0700336}
337EXPORT_SYMBOL(nf_unregister_hooks);
338
Mahesh Bandeware8bffe02016-09-16 12:59:13 -0700339/* Caller MUST take rtnl_lock */
340void _nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n)
341{
342 while (n-- > 0)
343 _nf_unregister_hook(&reg[n]);
344}
345EXPORT_SYMBOL(_nf_unregister_hooks);
346
Aaron Conolee3b37f12016-09-21 11:35:07 -0400347unsigned int nf_iterate(struct sk_buff *skb,
David S. Millercfdfab32015-04-03 16:23:58 -0400348 struct nf_hook_state *state,
Aaron Conolee3b37f12016-09-21 11:35:07 -0400349 struct nf_hook_entry **entryp)
Harald Weltef6ebe772005-08-09 20:21:49 -0700350{
351 unsigned int verdict;
352
353 /*
354 * The caller must not block between calls to this
355 * function because of risk of continuing from deleted element.
356 */
Aaron Conolee3b37f12016-09-21 11:35:07 -0400357 while (*entryp) {
358 if (state->thresh > (*entryp)->ops.priority) {
359 *entryp = rcu_dereference((*entryp)->next);
Harald Weltef6ebe772005-08-09 20:21:49 -0700360 continue;
Aaron Conolee3b37f12016-09-21 11:35:07 -0400361 }
Harald Weltef6ebe772005-08-09 20:21:49 -0700362
363 /* Optimization: we don't need to hold module
YOSHIFUJI Hideaki601e68e2007-02-12 11:15:49 -0800364 reference here, since function can't sleep. --RR */
Patrick McHardyde9963f2011-02-14 17:35:07 +0100365repeat:
Aaron Conolee3b37f12016-09-21 11:35:07 -0400366 verdict = (*entryp)->ops.hook((*entryp)->ops.priv, skb, state);
Harald Weltef6ebe772005-08-09 20:21:49 -0700367 if (verdict != NF_ACCEPT) {
368#ifdef CONFIG_NETFILTER_DEBUG
369 if (unlikely((verdict & NF_VERDICT_MASK)
370 > NF_MAX_VERDICT)) {
371 NFDEBUG("Evil return from %p(%u).\n",
Aaron Conolee3b37f12016-09-21 11:35:07 -0400372 (*entryp)->ops.hook, state->hook);
373 *entryp = rcu_dereference((*entryp)->next);
Harald Weltef6ebe772005-08-09 20:21:49 -0700374 continue;
375 }
376#endif
Michael Wang2a6decf2012-08-22 19:59:57 +0000377 if (verdict != NF_REPEAT)
Harald Weltef6ebe772005-08-09 20:21:49 -0700378 return verdict;
Patrick McHardyde9963f2011-02-14 17:35:07 +0100379 goto repeat;
Harald Weltef6ebe772005-08-09 20:21:49 -0700380 }
Aaron Conolee3b37f12016-09-21 11:35:07 -0400381 *entryp = rcu_dereference((*entryp)->next);
Harald Weltef6ebe772005-08-09 20:21:49 -0700382 }
383 return NF_ACCEPT;
384}
385
386
387/* Returns 1 if okfn() needs to be executed by the caller,
Aaron Conolee2361cb2016-09-21 11:35:04 -0400388 * -EPERM for NF_DROP, 0 otherwise. Caller must hold rcu_read_lock. */
David S. Millercfdfab32015-04-03 16:23:58 -0400389int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state)
Harald Weltef6ebe772005-08-09 20:21:49 -0700390{
Aaron Conolee3b37f12016-09-21 11:35:07 -0400391 struct nf_hook_entry *entry;
Harald Weltef6ebe772005-08-09 20:21:49 -0700392 unsigned int verdict;
393 int ret = 0;
394
Aaron Conolee3b37f12016-09-21 11:35:07 -0400395 entry = rcu_dereference(state->hook_entries);
Harald Weltef6ebe772005-08-09 20:21:49 -0700396next_hook:
Aaron Conolee3b37f12016-09-21 11:35:07 -0400397 verdict = nf_iterate(skb, state, &entry);
Harald Weltef6ebe772005-08-09 20:21:49 -0700398 if (verdict == NF_ACCEPT || verdict == NF_STOP) {
399 ret = 1;
Eric Parisda683652010-11-16 11:52:38 +0000400 } else if ((verdict & NF_VERDICT_MASK) == NF_DROP) {
Herbert Xu3db05fe2007-10-15 00:53:15 -0700401 kfree_skb(skb);
Florian Westphalf615df72011-01-18 15:52:14 +0100402 ret = NF_DROP_GETERR(verdict);
Eric Parisda683652010-11-16 11:52:38 +0000403 if (ret == 0)
404 ret = -EPERM;
Patrick McHardyf9c63992007-12-05 01:27:46 -0800405 } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
Aaron Conolee3b37f12016-09-21 11:35:07 -0400406 int err;
407
408 RCU_INIT_POINTER(state->hook_entries, entry);
409 err = nf_queue(skb, state, verdict >> NF_VERDICT_QBITS);
Florian Westphal563e1232011-10-31 12:20:16 +0100410 if (err < 0) {
Florian Westphal563e1232011-10-31 12:20:16 +0100411 if (err == -ESRCH &&
Florian Westphal94b27cc2011-01-18 16:08:30 +0100412 (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
413 goto next_hook;
Florian Westphal06cdb632011-01-18 15:28:38 +0100414 kfree_skb(skb);
415 }
Harald Weltef6ebe772005-08-09 20:21:49 -0700416 }
Harald Weltef6ebe772005-08-09 20:21:49 -0700417 return ret;
418}
419EXPORT_SYMBOL(nf_hook_slow);
420
421
Herbert Xu37d41872007-10-14 00:39:18 -0700422int skb_make_writable(struct sk_buff *skb, unsigned int writable_len)
Harald Weltef6ebe772005-08-09 20:21:49 -0700423{
Herbert Xu37d41872007-10-14 00:39:18 -0700424 if (writable_len > skb->len)
Harald Weltef6ebe772005-08-09 20:21:49 -0700425 return 0;
426
427 /* Not exclusive use of packet? Must copy. */
Herbert Xu37d41872007-10-14 00:39:18 -0700428 if (!skb_cloned(skb)) {
429 if (writable_len <= skb_headlen(skb))
430 return 1;
431 } else if (skb_clone_writable(skb, writable_len))
432 return 1;
Harald Weltef6ebe772005-08-09 20:21:49 -0700433
Herbert Xu37d41872007-10-14 00:39:18 -0700434 if (writable_len <= skb_headlen(skb))
435 writable_len = 0;
436 else
437 writable_len -= skb_headlen(skb);
Harald Weltef6ebe772005-08-09 20:21:49 -0700438
Herbert Xu37d41872007-10-14 00:39:18 -0700439 return !!__pskb_pull_tail(skb, writable_len);
Harald Weltef6ebe772005-08-09 20:21:49 -0700440}
441EXPORT_SYMBOL(skb_make_writable);
442
Pablo Neira Ayusob7bd1802015-09-30 22:53:44 +0100443/* This needs to be compiled in any case to avoid dependencies between the
444 * nfnetlink_queue code and nf_conntrack.
445 */
Ken-ichirou MATSUZAWAa4b47662015-10-05 11:47:13 +0900446struct nfnl_ct_hook __rcu *nfnl_ct_hook __read_mostly;
447EXPORT_SYMBOL_GPL(nfnl_ct_hook);
Pablo Neira Ayusob7bd1802015-09-30 22:53:44 +0100448
Igor Maravićc0cd1152011-12-12 02:58:24 +0000449#if IS_ENABLED(CONFIG_NF_CONNTRACK)
Harald Weltef6ebe772005-08-09 20:21:49 -0700450/* This does not belong here, but locally generated errors need it if connection
451 tracking in use: without this, connection may not be in hash table, and hence
452 manufactured ICMP or RST packets will not be associated with it. */
Patrick McHardy312a0c162013-07-28 22:54:08 +0200453void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *)
454 __rcu __read_mostly;
Harald Weltef6ebe772005-08-09 20:21:49 -0700455EXPORT_SYMBOL(ip_ct_attach);
456
Patrick McHardy312a0c162013-07-28 22:54:08 +0200457void nf_ct_attach(struct sk_buff *new, const struct sk_buff *skb)
Harald Weltef6ebe772005-08-09 20:21:49 -0700458{
Patrick McHardy312a0c162013-07-28 22:54:08 +0200459 void (*attach)(struct sk_buff *, const struct sk_buff *);
Harald Weltef6ebe772005-08-09 20:21:49 -0700460
Patrick McHardyc3a47ab2007-02-12 11:09:19 -0800461 if (skb->nfct) {
462 rcu_read_lock();
463 attach = rcu_dereference(ip_ct_attach);
464 if (attach)
465 attach(new, skb);
466 rcu_read_unlock();
Harald Weltef6ebe772005-08-09 20:21:49 -0700467 }
468}
469EXPORT_SYMBOL(nf_ct_attach);
Yasuyuki Kozakaide6e05c2007-03-23 11:17:27 -0700470
Eric Dumazet0e60ebe2010-11-15 18:17:21 +0100471void (*nf_ct_destroy)(struct nf_conntrack *) __rcu __read_mostly;
Yasuyuki Kozakaide6e05c2007-03-23 11:17:27 -0700472EXPORT_SYMBOL(nf_ct_destroy);
473
474void nf_conntrack_destroy(struct nf_conntrack *nfct)
475{
476 void (*destroy)(struct nf_conntrack *);
477
478 rcu_read_lock();
479 destroy = rcu_dereference(nf_ct_destroy);
480 BUG_ON(destroy == NULL);
481 destroy(nfct);
482 rcu_read_unlock();
483}
484EXPORT_SYMBOL(nf_conntrack_destroy);
Pablo Neira Ayuso9cb01762012-06-07 12:13:39 +0200485
Daniel Borkmann62da9862015-09-03 01:26:07 +0200486/* Built-in default zone used e.g. by modules. */
487const struct nf_conntrack_zone nf_ct_zone_dflt = {
488 .id = NF_CT_DEFAULT_ZONE_ID,
489 .dir = NF_CT_DEFAULT_ZONE_DIR,
490};
491EXPORT_SYMBOL_GPL(nf_ct_zone_dflt);
Yasuyuki Kozakaide6e05c2007-03-23 11:17:27 -0700492#endif /* CONFIG_NF_CONNTRACK */
Harald Weltef6ebe772005-08-09 20:21:49 -0700493
Patrick McHardyc7232c92012-08-26 19:14:06 +0200494#ifdef CONFIG_NF_NAT_NEEDED
495void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
496EXPORT_SYMBOL(nf_nat_decode_session_hook);
497#endif
498
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500499static int nf_register_hook_list(struct net *net)
500{
501 struct nf_hook_ops *elem;
502 int ret;
503
504 rtnl_lock();
505 list_for_each_entry(elem, &nf_hook_list, list) {
506 ret = nf_register_net_hook(net, elem);
507 if (ret && ret != -ENOENT)
508 goto out_undo;
509 }
510 rtnl_unlock();
511 return 0;
512
513out_undo:
514 list_for_each_entry_continue_reverse(elem, &nf_hook_list, list)
515 nf_unregister_net_hook(net, elem);
516 rtnl_unlock();
517 return ret;
518}
519
520static void nf_unregister_hook_list(struct net *net)
521{
522 struct nf_hook_ops *elem;
523
524 rtnl_lock();
525 list_for_each_entry(elem, &nf_hook_list, list)
526 nf_unregister_net_hook(net, elem);
527 rtnl_unlock();
528}
529
Gao fengf3c1a442013-03-24 23:50:39 +0000530static int __net_init netfilter_net_init(struct net *net)
531{
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500532 int i, h, ret;
533
534 for (i = 0; i < ARRAY_SIZE(net->nf.hooks); i++) {
535 for (h = 0; h < NF_MAX_HOOKS; h++)
Aaron Conolee3b37f12016-09-21 11:35:07 -0400536 RCU_INIT_POINTER(net->nf.hooks[i][h], NULL);
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500537 }
538
Gao fengf3c1a442013-03-24 23:50:39 +0000539#ifdef CONFIG_PROC_FS
540 net->nf.proc_netfilter = proc_net_mkdir(net, "netfilter",
541 net->proc_net);
Pablo Neira Ayuso12202fa2013-04-05 19:40:10 +0200542 if (!net->nf.proc_netfilter) {
543 if (!net_eq(net, &init_net))
544 pr_err("cannot create netfilter proc entry");
545
Gao fengf3c1a442013-03-24 23:50:39 +0000546 return -ENOMEM;
547 }
548#endif
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500549 ret = nf_register_hook_list(net);
550 if (ret)
551 remove_proc_entry("netfilter", net->proc_net);
552
553 return ret;
Gao fengf3c1a442013-03-24 23:50:39 +0000554}
555
556static void __net_exit netfilter_net_exit(struct net *net)
557{
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500558 nf_unregister_hook_list(net);
Gao fengf3c1a442013-03-24 23:50:39 +0000559 remove_proc_entry("netfilter", net->proc_net);
560}
561
562static struct pernet_operations netfilter_net_ops = {
563 .init = netfilter_net_init,
564 .exit = netfilter_net_exit,
565};
566
Pablo Neira Ayuso6d11cfd2013-05-22 22:42:36 +0000567int __init netfilter_init(void)
Harald Weltef6ebe772005-08-09 20:21:49 -0700568{
Eric W. Biederman085db2c2015-07-10 18:15:06 -0500569 int ret;
Harald Weltef6ebe772005-08-09 20:21:49 -0700570
Pablo Neira Ayuso6d11cfd2013-05-22 22:42:36 +0000571 ret = register_pernet_subsys(&netfilter_net_ops);
572 if (ret < 0)
573 goto err;
Harald Weltef6ebe772005-08-09 20:21:49 -0700574
Pablo Neira Ayuso6d11cfd2013-05-22 22:42:36 +0000575 ret = netfilter_log_init();
576 if (ret < 0)
577 goto err_pernet;
578
579 return 0;
580err_pernet:
581 unregister_pernet_subsys(&netfilter_net_ops);
582err:
583 return ret;
Harald Weltef6ebe772005-08-09 20:21:49 -0700584}