YOSHIFUJI Hideaki | 601e68e | 2007-02-12 11:15:49 -0800 | [diff] [blame] | 1 | /* netfilter.c: look after the filters for various protocols. |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 2 | * Heavily influenced by the old firewall.c by David Bonn and Alan Cox. |
| 3 | * |
| 4 | * Thanks to Rob `CmdrTaco' Malda for not influencing this code in any |
| 5 | * way. |
| 6 | * |
| 7 | * Rusty Russell (C)2000 -- This code is GPL. |
Patrick McHardy | f229f6c | 2013-04-06 15:24:29 +0200 | [diff] [blame] | 8 | * Patrick McHardy (c) 2006-2012 |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 9 | */ |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 10 | #include <linux/kernel.h> |
| 11 | #include <linux/netfilter.h> |
| 12 | #include <net/protocol.h> |
| 13 | #include <linux/init.h> |
| 14 | #include <linux/skbuff.h> |
| 15 | #include <linux/wait.h> |
| 16 | #include <linux/module.h> |
| 17 | #include <linux/interrupt.h> |
| 18 | #include <linux/if.h> |
| 19 | #include <linux/netdevice.h> |
Florian Westphal | 5676864 | 2014-11-13 10:04:16 +0100 | [diff] [blame] | 20 | #include <linux/netfilter_ipv6.h> |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 21 | #include <linux/inetdevice.h> |
| 22 | #include <linux/proc_fs.h> |
Patrick McHardy | d486dd1 | 2007-02-12 11:09:55 -0800 | [diff] [blame] | 23 | #include <linux/mutex.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 24 | #include <linux/slab.h> |
Aaron Conole | e3b37f1 | 2016-09-21 11:35:07 -0400 | [diff] [blame] | 25 | #include <linux/rcupdate.h> |
Eric W. Biederman | 457c4cb | 2007-09-12 12:01:34 +0200 | [diff] [blame] | 26 | #include <net/net_namespace.h> |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 27 | #include <net/sock.h> |
| 28 | |
| 29 | #include "nf_internals.h" |
| 30 | |
Patrick McHardy | d486dd1 | 2007-02-12 11:09:55 -0800 | [diff] [blame] | 31 | static DEFINE_MUTEX(afinfo_mutex); |
Patrick McHardy | bce8032 | 2006-04-06 14:18:09 -0700 | [diff] [blame] | 32 | |
Arnd Bergmann | 0906a37 | 2010-03-09 20:59:15 +0100 | [diff] [blame] | 33 | const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly; |
Patrick McHardy | bce8032 | 2006-04-06 14:18:09 -0700 | [diff] [blame] | 34 | EXPORT_SYMBOL(nf_afinfo); |
Florian Westphal | 2a7851b | 2013-05-17 03:56:10 +0000 | [diff] [blame] | 35 | const struct nf_ipv6_ops __rcu *nf_ipv6_ops __read_mostly; |
| 36 | EXPORT_SYMBOL_GPL(nf_ipv6_ops); |
Patrick McHardy | bce8032 | 2006-04-06 14:18:09 -0700 | [diff] [blame] | 37 | |
Florian Westphal | e7c8899 | 2015-07-14 17:51:07 +0200 | [diff] [blame] | 38 | DEFINE_PER_CPU(bool, nf_skb_duplicated); |
| 39 | EXPORT_SYMBOL_GPL(nf_skb_duplicated); |
| 40 | |
Patrick McHardy | 1e796fd | 2007-12-17 22:42:27 -0800 | [diff] [blame] | 41 | int nf_register_afinfo(const struct nf_afinfo *afinfo) |
Patrick McHardy | bce8032 | 2006-04-06 14:18:09 -0700 | [diff] [blame] | 42 | { |
Pablo Neira Ayuso | 7926dbf | 2014-07-31 20:38:46 +0200 | [diff] [blame] | 43 | mutex_lock(&afinfo_mutex); |
Stephen Hemminger | a9b3cd7 | 2011-08-01 16:19:00 +0000 | [diff] [blame] | 44 | RCU_INIT_POINTER(nf_afinfo[afinfo->family], afinfo); |
Patrick McHardy | d486dd1 | 2007-02-12 11:09:55 -0800 | [diff] [blame] | 45 | mutex_unlock(&afinfo_mutex); |
Patrick McHardy | bce8032 | 2006-04-06 14:18:09 -0700 | [diff] [blame] | 46 | return 0; |
| 47 | } |
| 48 | EXPORT_SYMBOL_GPL(nf_register_afinfo); |
| 49 | |
Patrick McHardy | 1e796fd | 2007-12-17 22:42:27 -0800 | [diff] [blame] | 50 | void nf_unregister_afinfo(const struct nf_afinfo *afinfo) |
Patrick McHardy | bce8032 | 2006-04-06 14:18:09 -0700 | [diff] [blame] | 51 | { |
Patrick McHardy | d486dd1 | 2007-02-12 11:09:55 -0800 | [diff] [blame] | 52 | mutex_lock(&afinfo_mutex); |
Stephen Hemminger | a9b3cd7 | 2011-08-01 16:19:00 +0000 | [diff] [blame] | 53 | RCU_INIT_POINTER(nf_afinfo[afinfo->family], NULL); |
Patrick McHardy | d486dd1 | 2007-02-12 11:09:55 -0800 | [diff] [blame] | 54 | mutex_unlock(&afinfo_mutex); |
Patrick McHardy | bce8032 | 2006-04-06 14:18:09 -0700 | [diff] [blame] | 55 | synchronize_rcu(); |
| 56 | } |
| 57 | EXPORT_SYMBOL_GPL(nf_unregister_afinfo); |
| 58 | |
Zhouyi Zhou | d1c85c2 | 2014-08-22 10:40:15 +0800 | [diff] [blame] | 59 | #ifdef HAVE_JUMP_LABEL |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 60 | struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; |
Eric Dumazet | a2d7ec5 | 2011-11-18 17:32:46 +0000 | [diff] [blame] | 61 | EXPORT_SYMBOL(nf_hooks_needed); |
| 62 | #endif |
| 63 | |
Patrick McHardy | fd706d6 | 2007-02-12 11:10:14 -0800 | [diff] [blame] | 64 | static DEFINE_MUTEX(nf_hook_mutex); |
Aaron Conole | e3b37f1 | 2016-09-21 11:35:07 -0400 | [diff] [blame] | 65 | #define nf_entry_dereference(e) \ |
| 66 | rcu_dereference_protected(e, lockdep_is_held(&nf_hook_mutex)) |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 67 | |
Aaron Conole | e3b37f1 | 2016-09-21 11:35:07 -0400 | [diff] [blame] | 68 | static struct nf_hook_entry *nf_hook_entry_head(struct net *net, |
| 69 | const struct nf_hook_ops *reg) |
Eric W. Biederman | 0edcf28 | 2015-07-10 18:14:30 -0500 | [diff] [blame] | 70 | { |
Aaron Conole | e3b37f1 | 2016-09-21 11:35:07 -0400 | [diff] [blame] | 71 | struct nf_hook_entry *hook_head = NULL; |
Eric W. Biederman | 0edcf28 | 2015-07-10 18:14:30 -0500 | [diff] [blame] | 72 | |
| 73 | if (reg->pf != NFPROTO_NETDEV) |
Aaron Conole | e3b37f1 | 2016-09-21 11:35:07 -0400 | [diff] [blame] | 74 | hook_head = nf_entry_dereference(net->nf.hooks[reg->pf] |
| 75 | [reg->hooknum]); |
Eric W. Biederman | 0edcf28 | 2015-07-10 18:14:30 -0500 | [diff] [blame] | 76 | else if (reg->hooknum == NF_NETDEV_INGRESS) { |
| 77 | #ifdef CONFIG_NETFILTER_INGRESS |
Eric W. Biederman | 085db2c | 2015-07-10 18:15:06 -0500 | [diff] [blame] | 78 | if (reg->dev && dev_net(reg->dev) == net) |
Aaron Conole | e3b37f1 | 2016-09-21 11:35:07 -0400 | [diff] [blame] | 79 | hook_head = |
| 80 | nf_entry_dereference( |
| 81 | reg->dev->nf_hooks_ingress); |
Eric W. Biederman | 0edcf28 | 2015-07-10 18:14:30 -0500 | [diff] [blame] | 82 | #endif |
| 83 | } |
Aaron Conole | e3b37f1 | 2016-09-21 11:35:07 -0400 | [diff] [blame] | 84 | return hook_head; |
Eric W. Biederman | 0edcf28 | 2015-07-10 18:14:30 -0500 | [diff] [blame] | 85 | } |
| 86 | |
Aaron Conole | e3b37f1 | 2016-09-21 11:35:07 -0400 | [diff] [blame] | 87 | /* must hold nf_hook_mutex */ |
| 88 | static void nf_set_hooks_head(struct net *net, const struct nf_hook_ops *reg, |
| 89 | struct nf_hook_entry *entry) |
| 90 | { |
| 91 | switch (reg->pf) { |
| 92 | case NFPROTO_NETDEV: |
Aaron Conole | 7816ec5 | 2016-09-28 11:35:15 -0400 | [diff] [blame] | 93 | #ifdef CONFIG_NETFILTER_INGRESS |
Aaron Conole | e3b37f1 | 2016-09-21 11:35:07 -0400 | [diff] [blame] | 94 | /* We already checked in nf_register_net_hook() that this is |
| 95 | * used from ingress. |
| 96 | */ |
| 97 | rcu_assign_pointer(reg->dev->nf_hooks_ingress, entry); |
Aaron Conole | 7816ec5 | 2016-09-28 11:35:15 -0400 | [diff] [blame] | 98 | #endif |
Aaron Conole | e3b37f1 | 2016-09-21 11:35:07 -0400 | [diff] [blame] | 99 | break; |
| 100 | default: |
| 101 | rcu_assign_pointer(net->nf.hooks[reg->pf][reg->hooknum], |
| 102 | entry); |
| 103 | break; |
| 104 | } |
| 105 | } |
Pablo Neira Ayuso | 7181eba | 2015-07-20 09:31:25 +0200 | [diff] [blame] | 106 | |
Eric W. Biederman | 085db2c | 2015-07-10 18:15:06 -0500 | [diff] [blame] | 107 | int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg) |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 108 | { |
Aaron Conole | e3b37f1 | 2016-09-21 11:35:07 -0400 | [diff] [blame] | 109 | struct nf_hook_entry *hooks_entry; |
Pablo Neira Ayuso | 7181eba | 2015-07-20 09:31:25 +0200 | [diff] [blame] | 110 | struct nf_hook_entry *entry; |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 111 | |
Aaron Conole | 7816ec5 | 2016-09-28 11:35:15 -0400 | [diff] [blame] | 112 | if (reg->pf == NFPROTO_NETDEV) { |
| 113 | #ifndef CONFIG_NETFILTER_INGRESS |
| 114 | if (reg->hooknum == NF_NETDEV_INGRESS) |
| 115 | return -EOPNOTSUPP; |
| 116 | #endif |
| 117 | if (reg->hooknum != NF_NETDEV_INGRESS || |
| 118 | !reg->dev || dev_net(reg->dev) != net) |
| 119 | return -EINVAL; |
| 120 | } |
Aaron Conole | d4bb5ca | 2016-09-21 11:35:05 -0400 | [diff] [blame] | 121 | |
Pablo Neira Ayuso | 7181eba | 2015-07-20 09:31:25 +0200 | [diff] [blame] | 122 | entry = kmalloc(sizeof(*entry), GFP_KERNEL); |
| 123 | if (!entry) |
Eric W. Biederman | 085db2c | 2015-07-10 18:15:06 -0500 | [diff] [blame] | 124 | return -ENOMEM; |
| 125 | |
Pablo Neira Ayuso | 7181eba | 2015-07-20 09:31:25 +0200 | [diff] [blame] | 126 | entry->orig_ops = reg; |
| 127 | entry->ops = *reg; |
Aaron Conole | e3b37f1 | 2016-09-21 11:35:07 -0400 | [diff] [blame] | 128 | entry->next = NULL; |
Pablo Neira | e687ad6 | 2015-05-13 18:19:38 +0200 | [diff] [blame] | 129 | |
Eric W. Biederman | 0edcf28 | 2015-07-10 18:14:30 -0500 | [diff] [blame] | 130 | mutex_lock(&nf_hook_mutex); |
Aaron Conole | e3b37f1 | 2016-09-21 11:35:07 -0400 | [diff] [blame] | 131 | hooks_entry = nf_hook_entry_head(net, reg); |
| 132 | |
| 133 | if (hooks_entry && hooks_entry->orig_ops->priority > reg->priority) { |
| 134 | /* This is the case where we need to insert at the head */ |
| 135 | entry->next = hooks_entry; |
| 136 | hooks_entry = NULL; |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 137 | } |
Aaron Conole | e3b37f1 | 2016-09-21 11:35:07 -0400 | [diff] [blame] | 138 | |
| 139 | while (hooks_entry && |
| 140 | reg->priority >= hooks_entry->orig_ops->priority && |
| 141 | nf_entry_dereference(hooks_entry->next)) { |
| 142 | hooks_entry = nf_entry_dereference(hooks_entry->next); |
| 143 | } |
| 144 | |
| 145 | if (hooks_entry) { |
| 146 | entry->next = nf_entry_dereference(hooks_entry->next); |
| 147 | rcu_assign_pointer(hooks_entry->next, entry); |
| 148 | } else { |
| 149 | nf_set_hooks_head(net, reg, entry); |
| 150 | } |
| 151 | |
Patrick McHardy | fd706d6 | 2007-02-12 11:10:14 -0800 | [diff] [blame] | 152 | mutex_unlock(&nf_hook_mutex); |
Eric W. Biederman | 4c09115 | 2015-07-10 18:13:58 -0500 | [diff] [blame] | 153 | #ifdef CONFIG_NETFILTER_INGRESS |
| 154 | if (reg->pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS) |
| 155 | net_inc_ingress_queue(); |
| 156 | #endif |
Zhouyi Zhou | d1c85c2 | 2014-08-22 10:40:15 +0800 | [diff] [blame] | 157 | #ifdef HAVE_JUMP_LABEL |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 158 | static_key_slow_inc(&nf_hooks_needed[reg->pf][reg->hooknum]); |
Eric Dumazet | a2d7ec5 | 2011-11-18 17:32:46 +0000 | [diff] [blame] | 159 | #endif |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 160 | return 0; |
| 161 | } |
Eric W. Biederman | 085db2c | 2015-07-10 18:15:06 -0500 | [diff] [blame] | 162 | EXPORT_SYMBOL(nf_register_net_hook); |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 163 | |
Eric W. Biederman | 085db2c | 2015-07-10 18:15:06 -0500 | [diff] [blame] | 164 | void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg) |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 165 | { |
Aaron Conole | e3b37f1 | 2016-09-21 11:35:07 -0400 | [diff] [blame] | 166 | struct nf_hook_entry *hooks_entry; |
Eric W. Biederman | 085db2c | 2015-07-10 18:15:06 -0500 | [diff] [blame] | 167 | |
Patrick McHardy | fd706d6 | 2007-02-12 11:10:14 -0800 | [diff] [blame] | 168 | mutex_lock(&nf_hook_mutex); |
Aaron Conole | e3b37f1 | 2016-09-21 11:35:07 -0400 | [diff] [blame] | 169 | hooks_entry = nf_hook_entry_head(net, reg); |
Aaron Conole | 5119e43 | 2016-09-28 11:35:14 -0400 | [diff] [blame] | 170 | if (hooks_entry && hooks_entry->orig_ops == reg) { |
Aaron Conole | e3b37f1 | 2016-09-21 11:35:07 -0400 | [diff] [blame] | 171 | nf_set_hooks_head(net, reg, |
| 172 | nf_entry_dereference(hooks_entry->next)); |
| 173 | goto unlock; |
Eric W. Biederman | 085db2c | 2015-07-10 18:15:06 -0500 | [diff] [blame] | 174 | } |
Aaron Conole | e3b37f1 | 2016-09-21 11:35:07 -0400 | [diff] [blame] | 175 | while (hooks_entry && nf_entry_dereference(hooks_entry->next)) { |
| 176 | struct nf_hook_entry *next = |
| 177 | nf_entry_dereference(hooks_entry->next); |
| 178 | struct nf_hook_entry *nnext; |
| 179 | |
| 180 | if (next->orig_ops != reg) { |
| 181 | hooks_entry = next; |
| 182 | continue; |
| 183 | } |
| 184 | nnext = nf_entry_dereference(next->next); |
| 185 | rcu_assign_pointer(hooks_entry->next, nnext); |
| 186 | hooks_entry = next; |
| 187 | break; |
| 188 | } |
| 189 | |
| 190 | unlock: |
Patrick McHardy | fd706d6 | 2007-02-12 11:10:14 -0800 | [diff] [blame] | 191 | mutex_unlock(&nf_hook_mutex); |
Aaron Conole | e3b37f1 | 2016-09-21 11:35:07 -0400 | [diff] [blame] | 192 | if (!hooks_entry) { |
Eric W. Biederman | 085db2c | 2015-07-10 18:15:06 -0500 | [diff] [blame] | 193 | WARN(1, "nf_unregister_net_hook: hook not found!\n"); |
| 194 | return; |
| 195 | } |
Pablo Neira | e687ad6 | 2015-05-13 18:19:38 +0200 | [diff] [blame] | 196 | #ifdef CONFIG_NETFILTER_INGRESS |
Eric W. Biederman | 4c09115 | 2015-07-10 18:13:58 -0500 | [diff] [blame] | 197 | if (reg->pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS) |
| 198 | net_dec_ingress_queue(); |
Pablo Neira | e687ad6 | 2015-05-13 18:19:38 +0200 | [diff] [blame] | 199 | #endif |
Zhouyi Zhou | d1c85c2 | 2014-08-22 10:40:15 +0800 | [diff] [blame] | 200 | #ifdef HAVE_JUMP_LABEL |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 201 | static_key_slow_dec(&nf_hooks_needed[reg->pf][reg->hooknum]); |
Eric Dumazet | a2d7ec5 | 2011-11-18 17:32:46 +0000 | [diff] [blame] | 202 | #endif |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 203 | synchronize_net(); |
Aaron Conole | e3b37f1 | 2016-09-21 11:35:07 -0400 | [diff] [blame] | 204 | nf_queue_nf_hook_drop(net, hooks_entry); |
Florian Westphal | 514ed62 | 2015-10-08 23:38:07 +0200 | [diff] [blame] | 205 | /* other cpu might still process nfqueue verdict that used reg */ |
| 206 | synchronize_net(); |
Aaron Conole | e3b37f1 | 2016-09-21 11:35:07 -0400 | [diff] [blame] | 207 | kfree(hooks_entry); |
Eric W. Biederman | 085db2c | 2015-07-10 18:15:06 -0500 | [diff] [blame] | 208 | } |
| 209 | EXPORT_SYMBOL(nf_unregister_net_hook); |
| 210 | |
| 211 | int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg, |
| 212 | unsigned int n) |
| 213 | { |
| 214 | unsigned int i; |
| 215 | int err = 0; |
| 216 | |
| 217 | for (i = 0; i < n; i++) { |
| 218 | err = nf_register_net_hook(net, ®[i]); |
| 219 | if (err) |
| 220 | goto err; |
| 221 | } |
| 222 | return err; |
| 223 | |
| 224 | err: |
| 225 | if (i > 0) |
| 226 | nf_unregister_net_hooks(net, reg, i); |
| 227 | return err; |
| 228 | } |
| 229 | EXPORT_SYMBOL(nf_register_net_hooks); |
| 230 | |
| 231 | void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg, |
| 232 | unsigned int n) |
| 233 | { |
| 234 | while (n-- > 0) |
| 235 | nf_unregister_net_hook(net, ®[n]); |
| 236 | } |
| 237 | EXPORT_SYMBOL(nf_unregister_net_hooks); |
| 238 | |
| 239 | static LIST_HEAD(nf_hook_list); |
| 240 | |
Mahesh Bandewar | e8bffe0 | 2016-09-16 12:59:13 -0700 | [diff] [blame] | 241 | static int _nf_register_hook(struct nf_hook_ops *reg) |
Eric W. Biederman | 085db2c | 2015-07-10 18:15:06 -0500 | [diff] [blame] | 242 | { |
| 243 | struct net *net, *last; |
| 244 | int ret; |
| 245 | |
Eric W. Biederman | 085db2c | 2015-07-10 18:15:06 -0500 | [diff] [blame] | 246 | for_each_net(net) { |
| 247 | ret = nf_register_net_hook(net, reg); |
| 248 | if (ret && ret != -ENOENT) |
| 249 | goto rollback; |
| 250 | } |
| 251 | list_add_tail(®->list, &nf_hook_list); |
Eric W. Biederman | 085db2c | 2015-07-10 18:15:06 -0500 | [diff] [blame] | 252 | |
| 253 | return 0; |
| 254 | rollback: |
| 255 | last = net; |
| 256 | for_each_net(net) { |
| 257 | if (net == last) |
| 258 | break; |
| 259 | nf_unregister_net_hook(net, reg); |
| 260 | } |
Mahesh Bandewar | e8bffe0 | 2016-09-16 12:59:13 -0700 | [diff] [blame] | 261 | return ret; |
| 262 | } |
| 263 | |
| 264 | int nf_register_hook(struct nf_hook_ops *reg) |
| 265 | { |
| 266 | int ret; |
| 267 | |
| 268 | rtnl_lock(); |
| 269 | ret = _nf_register_hook(reg); |
Eric W. Biederman | 085db2c | 2015-07-10 18:15:06 -0500 | [diff] [blame] | 270 | rtnl_unlock(); |
Mahesh Bandewar | e8bffe0 | 2016-09-16 12:59:13 -0700 | [diff] [blame] | 271 | |
Eric W. Biederman | 085db2c | 2015-07-10 18:15:06 -0500 | [diff] [blame] | 272 | return ret; |
| 273 | } |
| 274 | EXPORT_SYMBOL(nf_register_hook); |
| 275 | |
Mahesh Bandewar | e8bffe0 | 2016-09-16 12:59:13 -0700 | [diff] [blame] | 276 | static void _nf_unregister_hook(struct nf_hook_ops *reg) |
Eric W. Biederman | 085db2c | 2015-07-10 18:15:06 -0500 | [diff] [blame] | 277 | { |
| 278 | struct net *net; |
| 279 | |
Eric W. Biederman | 085db2c | 2015-07-10 18:15:06 -0500 | [diff] [blame] | 280 | list_del(®->list); |
| 281 | for_each_net(net) |
| 282 | nf_unregister_net_hook(net, reg); |
Mahesh Bandewar | e8bffe0 | 2016-09-16 12:59:13 -0700 | [diff] [blame] | 283 | } |
| 284 | |
| 285 | void nf_unregister_hook(struct nf_hook_ops *reg) |
| 286 | { |
| 287 | rtnl_lock(); |
| 288 | _nf_unregister_hook(reg); |
Eric W. Biederman | 085db2c | 2015-07-10 18:15:06 -0500 | [diff] [blame] | 289 | rtnl_unlock(); |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 290 | } |
| 291 | EXPORT_SYMBOL(nf_unregister_hook); |
| 292 | |
Patrick McHardy | 972d1cb | 2006-04-06 14:09:12 -0700 | [diff] [blame] | 293 | int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n) |
| 294 | { |
| 295 | unsigned int i; |
| 296 | int err = 0; |
| 297 | |
| 298 | for (i = 0; i < n; i++) { |
| 299 | err = nf_register_hook(®[i]); |
| 300 | if (err) |
| 301 | goto err; |
| 302 | } |
| 303 | return err; |
| 304 | |
| 305 | err: |
| 306 | if (i > 0) |
| 307 | nf_unregister_hooks(reg, i); |
| 308 | return err; |
| 309 | } |
| 310 | EXPORT_SYMBOL(nf_register_hooks); |
| 311 | |
Mahesh Bandewar | e8bffe0 | 2016-09-16 12:59:13 -0700 | [diff] [blame] | 312 | /* Caller MUST take rtnl_lock() */ |
| 313 | int _nf_register_hooks(struct nf_hook_ops *reg, unsigned int n) |
| 314 | { |
| 315 | unsigned int i; |
| 316 | int err = 0; |
| 317 | |
| 318 | for (i = 0; i < n; i++) { |
| 319 | err = _nf_register_hook(®[i]); |
| 320 | if (err) |
| 321 | goto err; |
| 322 | } |
| 323 | return err; |
| 324 | |
| 325 | err: |
| 326 | if (i > 0) |
| 327 | _nf_unregister_hooks(reg, i); |
| 328 | return err; |
| 329 | } |
| 330 | EXPORT_SYMBOL(_nf_register_hooks); |
| 331 | |
Patrick McHardy | 972d1cb | 2006-04-06 14:09:12 -0700 | [diff] [blame] | 332 | void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n) |
| 333 | { |
Changli Gao | f68c530 | 2010-10-04 22:24:12 +0200 | [diff] [blame] | 334 | while (n-- > 0) |
| 335 | nf_unregister_hook(®[n]); |
Patrick McHardy | 972d1cb | 2006-04-06 14:09:12 -0700 | [diff] [blame] | 336 | } |
| 337 | EXPORT_SYMBOL(nf_unregister_hooks); |
| 338 | |
Mahesh Bandewar | e8bffe0 | 2016-09-16 12:59:13 -0700 | [diff] [blame] | 339 | /* Caller MUST take rtnl_lock */ |
| 340 | void _nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n) |
| 341 | { |
| 342 | while (n-- > 0) |
| 343 | _nf_unregister_hook(®[n]); |
| 344 | } |
| 345 | EXPORT_SYMBOL(_nf_unregister_hooks); |
| 346 | |
Aaron Conole | e3b37f1 | 2016-09-21 11:35:07 -0400 | [diff] [blame] | 347 | unsigned int nf_iterate(struct sk_buff *skb, |
David S. Miller | cfdfab3 | 2015-04-03 16:23:58 -0400 | [diff] [blame] | 348 | struct nf_hook_state *state, |
Aaron Conole | e3b37f1 | 2016-09-21 11:35:07 -0400 | [diff] [blame] | 349 | struct nf_hook_entry **entryp) |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 350 | { |
| 351 | unsigned int verdict; |
| 352 | |
| 353 | /* |
| 354 | * The caller must not block between calls to this |
| 355 | * function because of risk of continuing from deleted element. |
| 356 | */ |
Aaron Conole | e3b37f1 | 2016-09-21 11:35:07 -0400 | [diff] [blame] | 357 | while (*entryp) { |
| 358 | if (state->thresh > (*entryp)->ops.priority) { |
| 359 | *entryp = rcu_dereference((*entryp)->next); |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 360 | continue; |
Aaron Conole | e3b37f1 | 2016-09-21 11:35:07 -0400 | [diff] [blame] | 361 | } |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 362 | |
| 363 | /* Optimization: we don't need to hold module |
YOSHIFUJI Hideaki | 601e68e | 2007-02-12 11:15:49 -0800 | [diff] [blame] | 364 | reference here, since function can't sleep. --RR */ |
Patrick McHardy | de9963f | 2011-02-14 17:35:07 +0100 | [diff] [blame] | 365 | repeat: |
Aaron Conole | e3b37f1 | 2016-09-21 11:35:07 -0400 | [diff] [blame] | 366 | verdict = (*entryp)->ops.hook((*entryp)->ops.priv, skb, state); |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 367 | if (verdict != NF_ACCEPT) { |
| 368 | #ifdef CONFIG_NETFILTER_DEBUG |
| 369 | if (unlikely((verdict & NF_VERDICT_MASK) |
| 370 | > NF_MAX_VERDICT)) { |
| 371 | NFDEBUG("Evil return from %p(%u).\n", |
Aaron Conole | e3b37f1 | 2016-09-21 11:35:07 -0400 | [diff] [blame] | 372 | (*entryp)->ops.hook, state->hook); |
| 373 | *entryp = rcu_dereference((*entryp)->next); |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 374 | continue; |
| 375 | } |
| 376 | #endif |
Michael Wang | 2a6decf | 2012-08-22 19:59:57 +0000 | [diff] [blame] | 377 | if (verdict != NF_REPEAT) |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 378 | return verdict; |
Patrick McHardy | de9963f | 2011-02-14 17:35:07 +0100 | [diff] [blame] | 379 | goto repeat; |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 380 | } |
Aaron Conole | e3b37f1 | 2016-09-21 11:35:07 -0400 | [diff] [blame] | 381 | *entryp = rcu_dereference((*entryp)->next); |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 382 | } |
| 383 | return NF_ACCEPT; |
| 384 | } |
| 385 | |
| 386 | |
| 387 | /* Returns 1 if okfn() needs to be executed by the caller, |
Aaron Conole | e2361cb | 2016-09-21 11:35:04 -0400 | [diff] [blame] | 388 | * -EPERM for NF_DROP, 0 otherwise. Caller must hold rcu_read_lock. */ |
David S. Miller | cfdfab3 | 2015-04-03 16:23:58 -0400 | [diff] [blame] | 389 | int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state) |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 390 | { |
Aaron Conole | e3b37f1 | 2016-09-21 11:35:07 -0400 | [diff] [blame] | 391 | struct nf_hook_entry *entry; |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 392 | unsigned int verdict; |
| 393 | int ret = 0; |
| 394 | |
Aaron Conole | e3b37f1 | 2016-09-21 11:35:07 -0400 | [diff] [blame] | 395 | entry = rcu_dereference(state->hook_entries); |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 396 | next_hook: |
Aaron Conole | e3b37f1 | 2016-09-21 11:35:07 -0400 | [diff] [blame] | 397 | verdict = nf_iterate(skb, state, &entry); |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 398 | if (verdict == NF_ACCEPT || verdict == NF_STOP) { |
| 399 | ret = 1; |
Eric Paris | da68365 | 2010-11-16 11:52:38 +0000 | [diff] [blame] | 400 | } else if ((verdict & NF_VERDICT_MASK) == NF_DROP) { |
Herbert Xu | 3db05fe | 2007-10-15 00:53:15 -0700 | [diff] [blame] | 401 | kfree_skb(skb); |
Florian Westphal | f615df7 | 2011-01-18 15:52:14 +0100 | [diff] [blame] | 402 | ret = NF_DROP_GETERR(verdict); |
Eric Paris | da68365 | 2010-11-16 11:52:38 +0000 | [diff] [blame] | 403 | if (ret == 0) |
| 404 | ret = -EPERM; |
Patrick McHardy | f9c6399 | 2007-12-05 01:27:46 -0800 | [diff] [blame] | 405 | } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) { |
Aaron Conole | e3b37f1 | 2016-09-21 11:35:07 -0400 | [diff] [blame] | 406 | int err; |
| 407 | |
| 408 | RCU_INIT_POINTER(state->hook_entries, entry); |
| 409 | err = nf_queue(skb, state, verdict >> NF_VERDICT_QBITS); |
Florian Westphal | 563e123 | 2011-10-31 12:20:16 +0100 | [diff] [blame] | 410 | if (err < 0) { |
Florian Westphal | 563e123 | 2011-10-31 12:20:16 +0100 | [diff] [blame] | 411 | if (err == -ESRCH && |
Florian Westphal | 94b27cc | 2011-01-18 16:08:30 +0100 | [diff] [blame] | 412 | (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS)) |
| 413 | goto next_hook; |
Florian Westphal | 06cdb63 | 2011-01-18 15:28:38 +0100 | [diff] [blame] | 414 | kfree_skb(skb); |
| 415 | } |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 416 | } |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 417 | return ret; |
| 418 | } |
| 419 | EXPORT_SYMBOL(nf_hook_slow); |
| 420 | |
| 421 | |
Herbert Xu | 37d4187 | 2007-10-14 00:39:18 -0700 | [diff] [blame] | 422 | int skb_make_writable(struct sk_buff *skb, unsigned int writable_len) |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 423 | { |
Herbert Xu | 37d4187 | 2007-10-14 00:39:18 -0700 | [diff] [blame] | 424 | if (writable_len > skb->len) |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 425 | return 0; |
| 426 | |
| 427 | /* Not exclusive use of packet? Must copy. */ |
Herbert Xu | 37d4187 | 2007-10-14 00:39:18 -0700 | [diff] [blame] | 428 | if (!skb_cloned(skb)) { |
| 429 | if (writable_len <= skb_headlen(skb)) |
| 430 | return 1; |
| 431 | } else if (skb_clone_writable(skb, writable_len)) |
| 432 | return 1; |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 433 | |
Herbert Xu | 37d4187 | 2007-10-14 00:39:18 -0700 | [diff] [blame] | 434 | if (writable_len <= skb_headlen(skb)) |
| 435 | writable_len = 0; |
| 436 | else |
| 437 | writable_len -= skb_headlen(skb); |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 438 | |
Herbert Xu | 37d4187 | 2007-10-14 00:39:18 -0700 | [diff] [blame] | 439 | return !!__pskb_pull_tail(skb, writable_len); |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 440 | } |
| 441 | EXPORT_SYMBOL(skb_make_writable); |
| 442 | |
Pablo Neira Ayuso | b7bd180 | 2015-09-30 22:53:44 +0100 | [diff] [blame] | 443 | /* This needs to be compiled in any case to avoid dependencies between the |
| 444 | * nfnetlink_queue code and nf_conntrack. |
| 445 | */ |
Ken-ichirou MATSUZAWA | a4b4766 | 2015-10-05 11:47:13 +0900 | [diff] [blame] | 446 | struct nfnl_ct_hook __rcu *nfnl_ct_hook __read_mostly; |
| 447 | EXPORT_SYMBOL_GPL(nfnl_ct_hook); |
Pablo Neira Ayuso | b7bd180 | 2015-09-30 22:53:44 +0100 | [diff] [blame] | 448 | |
Igor Maravić | c0cd115 | 2011-12-12 02:58:24 +0000 | [diff] [blame] | 449 | #if IS_ENABLED(CONFIG_NF_CONNTRACK) |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 450 | /* This does not belong here, but locally generated errors need it if connection |
| 451 | tracking in use: without this, connection may not be in hash table, and hence |
| 452 | manufactured ICMP or RST packets will not be associated with it. */ |
Patrick McHardy | 312a0c16 | 2013-07-28 22:54:08 +0200 | [diff] [blame] | 453 | void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) |
| 454 | __rcu __read_mostly; |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 455 | EXPORT_SYMBOL(ip_ct_attach); |
| 456 | |
Patrick McHardy | 312a0c16 | 2013-07-28 22:54:08 +0200 | [diff] [blame] | 457 | void nf_ct_attach(struct sk_buff *new, const struct sk_buff *skb) |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 458 | { |
Patrick McHardy | 312a0c16 | 2013-07-28 22:54:08 +0200 | [diff] [blame] | 459 | void (*attach)(struct sk_buff *, const struct sk_buff *); |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 460 | |
Patrick McHardy | c3a47ab | 2007-02-12 11:09:19 -0800 | [diff] [blame] | 461 | if (skb->nfct) { |
| 462 | rcu_read_lock(); |
| 463 | attach = rcu_dereference(ip_ct_attach); |
| 464 | if (attach) |
| 465 | attach(new, skb); |
| 466 | rcu_read_unlock(); |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 467 | } |
| 468 | } |
| 469 | EXPORT_SYMBOL(nf_ct_attach); |
Yasuyuki Kozakai | de6e05c | 2007-03-23 11:17:27 -0700 | [diff] [blame] | 470 | |
Eric Dumazet | 0e60ebe | 2010-11-15 18:17:21 +0100 | [diff] [blame] | 471 | void (*nf_ct_destroy)(struct nf_conntrack *) __rcu __read_mostly; |
Yasuyuki Kozakai | de6e05c | 2007-03-23 11:17:27 -0700 | [diff] [blame] | 472 | EXPORT_SYMBOL(nf_ct_destroy); |
| 473 | |
| 474 | void nf_conntrack_destroy(struct nf_conntrack *nfct) |
| 475 | { |
| 476 | void (*destroy)(struct nf_conntrack *); |
| 477 | |
| 478 | rcu_read_lock(); |
| 479 | destroy = rcu_dereference(nf_ct_destroy); |
| 480 | BUG_ON(destroy == NULL); |
| 481 | destroy(nfct); |
| 482 | rcu_read_unlock(); |
| 483 | } |
| 484 | EXPORT_SYMBOL(nf_conntrack_destroy); |
Pablo Neira Ayuso | 9cb0176 | 2012-06-07 12:13:39 +0200 | [diff] [blame] | 485 | |
Daniel Borkmann | 62da986 | 2015-09-03 01:26:07 +0200 | [diff] [blame] | 486 | /* Built-in default zone used e.g. by modules. */ |
| 487 | const struct nf_conntrack_zone nf_ct_zone_dflt = { |
| 488 | .id = NF_CT_DEFAULT_ZONE_ID, |
| 489 | .dir = NF_CT_DEFAULT_ZONE_DIR, |
| 490 | }; |
| 491 | EXPORT_SYMBOL_GPL(nf_ct_zone_dflt); |
Yasuyuki Kozakai | de6e05c | 2007-03-23 11:17:27 -0700 | [diff] [blame] | 492 | #endif /* CONFIG_NF_CONNTRACK */ |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 493 | |
Patrick McHardy | c7232c9 | 2012-08-26 19:14:06 +0200 | [diff] [blame] | 494 | #ifdef CONFIG_NF_NAT_NEEDED |
| 495 | void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *); |
| 496 | EXPORT_SYMBOL(nf_nat_decode_session_hook); |
| 497 | #endif |
| 498 | |
Eric W. Biederman | 085db2c | 2015-07-10 18:15:06 -0500 | [diff] [blame] | 499 | static int nf_register_hook_list(struct net *net) |
| 500 | { |
| 501 | struct nf_hook_ops *elem; |
| 502 | int ret; |
| 503 | |
| 504 | rtnl_lock(); |
| 505 | list_for_each_entry(elem, &nf_hook_list, list) { |
| 506 | ret = nf_register_net_hook(net, elem); |
| 507 | if (ret && ret != -ENOENT) |
| 508 | goto out_undo; |
| 509 | } |
| 510 | rtnl_unlock(); |
| 511 | return 0; |
| 512 | |
| 513 | out_undo: |
| 514 | list_for_each_entry_continue_reverse(elem, &nf_hook_list, list) |
| 515 | nf_unregister_net_hook(net, elem); |
| 516 | rtnl_unlock(); |
| 517 | return ret; |
| 518 | } |
| 519 | |
| 520 | static void nf_unregister_hook_list(struct net *net) |
| 521 | { |
| 522 | struct nf_hook_ops *elem; |
| 523 | |
| 524 | rtnl_lock(); |
| 525 | list_for_each_entry(elem, &nf_hook_list, list) |
| 526 | nf_unregister_net_hook(net, elem); |
| 527 | rtnl_unlock(); |
| 528 | } |
| 529 | |
Gao feng | f3c1a44 | 2013-03-24 23:50:39 +0000 | [diff] [blame] | 530 | static int __net_init netfilter_net_init(struct net *net) |
| 531 | { |
Eric W. Biederman | 085db2c | 2015-07-10 18:15:06 -0500 | [diff] [blame] | 532 | int i, h, ret; |
| 533 | |
| 534 | for (i = 0; i < ARRAY_SIZE(net->nf.hooks); i++) { |
| 535 | for (h = 0; h < NF_MAX_HOOKS; h++) |
Aaron Conole | e3b37f1 | 2016-09-21 11:35:07 -0400 | [diff] [blame] | 536 | RCU_INIT_POINTER(net->nf.hooks[i][h], NULL); |
Eric W. Biederman | 085db2c | 2015-07-10 18:15:06 -0500 | [diff] [blame] | 537 | } |
| 538 | |
Gao feng | f3c1a44 | 2013-03-24 23:50:39 +0000 | [diff] [blame] | 539 | #ifdef CONFIG_PROC_FS |
| 540 | net->nf.proc_netfilter = proc_net_mkdir(net, "netfilter", |
| 541 | net->proc_net); |
Pablo Neira Ayuso | 12202fa | 2013-04-05 19:40:10 +0200 | [diff] [blame] | 542 | if (!net->nf.proc_netfilter) { |
| 543 | if (!net_eq(net, &init_net)) |
| 544 | pr_err("cannot create netfilter proc entry"); |
| 545 | |
Gao feng | f3c1a44 | 2013-03-24 23:50:39 +0000 | [diff] [blame] | 546 | return -ENOMEM; |
| 547 | } |
| 548 | #endif |
Eric W. Biederman | 085db2c | 2015-07-10 18:15:06 -0500 | [diff] [blame] | 549 | ret = nf_register_hook_list(net); |
| 550 | if (ret) |
| 551 | remove_proc_entry("netfilter", net->proc_net); |
| 552 | |
| 553 | return ret; |
Gao feng | f3c1a44 | 2013-03-24 23:50:39 +0000 | [diff] [blame] | 554 | } |
| 555 | |
| 556 | static void __net_exit netfilter_net_exit(struct net *net) |
| 557 | { |
Eric W. Biederman | 085db2c | 2015-07-10 18:15:06 -0500 | [diff] [blame] | 558 | nf_unregister_hook_list(net); |
Gao feng | f3c1a44 | 2013-03-24 23:50:39 +0000 | [diff] [blame] | 559 | remove_proc_entry("netfilter", net->proc_net); |
| 560 | } |
| 561 | |
| 562 | static struct pernet_operations netfilter_net_ops = { |
| 563 | .init = netfilter_net_init, |
| 564 | .exit = netfilter_net_exit, |
| 565 | }; |
| 566 | |
Pablo Neira Ayuso | 6d11cfd | 2013-05-22 22:42:36 +0000 | [diff] [blame] | 567 | int __init netfilter_init(void) |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 568 | { |
Eric W. Biederman | 085db2c | 2015-07-10 18:15:06 -0500 | [diff] [blame] | 569 | int ret; |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 570 | |
Pablo Neira Ayuso | 6d11cfd | 2013-05-22 22:42:36 +0000 | [diff] [blame] | 571 | ret = register_pernet_subsys(&netfilter_net_ops); |
| 572 | if (ret < 0) |
| 573 | goto err; |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 574 | |
Pablo Neira Ayuso | 6d11cfd | 2013-05-22 22:42:36 +0000 | [diff] [blame] | 575 | ret = netfilter_log_init(); |
| 576 | if (ret < 0) |
| 577 | goto err_pernet; |
| 578 | |
| 579 | return 0; |
| 580 | err_pernet: |
| 581 | unregister_pernet_subsys(&netfilter_net_ops); |
| 582 | err: |
| 583 | return ret; |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 584 | } |