YOSHIFUJI Hideaki | 601e68e | 2007-02-12 11:15:49 -0800 | [diff] [blame] | 1 | /* netfilter.c: look after the filters for various protocols. |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 2 | * Heavily influenced by the old firewall.c by David Bonn and Alan Cox. |
| 3 | * |
| 4 | * Thanks to Rob `CmdrTaco' Malda for not influencing this code in any |
| 5 | * way. |
| 6 | * |
| 7 | * Rusty Russell (C)2000 -- This code is GPL. |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 8 | */ |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 9 | #include <linux/kernel.h> |
| 10 | #include <linux/netfilter.h> |
| 11 | #include <net/protocol.h> |
| 12 | #include <linux/init.h> |
| 13 | #include <linux/skbuff.h> |
| 14 | #include <linux/wait.h> |
| 15 | #include <linux/module.h> |
| 16 | #include <linux/interrupt.h> |
| 17 | #include <linux/if.h> |
| 18 | #include <linux/netdevice.h> |
| 19 | #include <linux/inetdevice.h> |
| 20 | #include <linux/proc_fs.h> |
Patrick McHardy | d486dd1 | 2007-02-12 11:09:55 -0800 | [diff] [blame] | 21 | #include <linux/mutex.h> |
Eric W. Biederman | 457c4cb | 2007-09-12 12:01:34 +0200 | [diff] [blame] | 22 | #include <net/net_namespace.h> |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 23 | #include <net/sock.h> |
| 24 | |
| 25 | #include "nf_internals.h" |
| 26 | |
Patrick McHardy | d486dd1 | 2007-02-12 11:09:55 -0800 | [diff] [blame] | 27 | static DEFINE_MUTEX(afinfo_mutex); |
Patrick McHardy | bce8032 | 2006-04-06 14:18:09 -0700 | [diff] [blame] | 28 | |
Martin Josefsson | e2b7606 | 2006-11-29 02:35:04 +0100 | [diff] [blame] | 29 | struct nf_afinfo *nf_afinfo[NPROTO] __read_mostly; |
Patrick McHardy | bce8032 | 2006-04-06 14:18:09 -0700 | [diff] [blame] | 30 | EXPORT_SYMBOL(nf_afinfo); |
| 31 | |
| 32 | int nf_register_afinfo(struct nf_afinfo *afinfo) |
| 33 | { |
Patrick McHardy | d486dd1 | 2007-02-12 11:09:55 -0800 | [diff] [blame] | 34 | int err; |
| 35 | |
| 36 | err = mutex_lock_interruptible(&afinfo_mutex); |
| 37 | if (err < 0) |
| 38 | return err; |
Patrick McHardy | bce8032 | 2006-04-06 14:18:09 -0700 | [diff] [blame] | 39 | rcu_assign_pointer(nf_afinfo[afinfo->family], afinfo); |
Patrick McHardy | d486dd1 | 2007-02-12 11:09:55 -0800 | [diff] [blame] | 40 | mutex_unlock(&afinfo_mutex); |
Patrick McHardy | bce8032 | 2006-04-06 14:18:09 -0700 | [diff] [blame] | 41 | return 0; |
| 42 | } |
| 43 | EXPORT_SYMBOL_GPL(nf_register_afinfo); |
| 44 | |
| 45 | void nf_unregister_afinfo(struct nf_afinfo *afinfo) |
| 46 | { |
Patrick McHardy | d486dd1 | 2007-02-12 11:09:55 -0800 | [diff] [blame] | 47 | mutex_lock(&afinfo_mutex); |
Patrick McHardy | bce8032 | 2006-04-06 14:18:09 -0700 | [diff] [blame] | 48 | rcu_assign_pointer(nf_afinfo[afinfo->family], NULL); |
Patrick McHardy | d486dd1 | 2007-02-12 11:09:55 -0800 | [diff] [blame] | 49 | mutex_unlock(&afinfo_mutex); |
Patrick McHardy | bce8032 | 2006-04-06 14:18:09 -0700 | [diff] [blame] | 50 | synchronize_rcu(); |
| 51 | } |
| 52 | EXPORT_SYMBOL_GPL(nf_unregister_afinfo); |
| 53 | |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 54 | /* In this code, we can be waiting indefinitely for userspace to |
| 55 | * service a packet if a hook returns NF_QUEUE. We could keep a count |
| 56 | * of skbuffs queued for userspace, and not deregister a hook unless |
| 57 | * this is zero, but that sucks. Now, we simply check when the |
| 58 | * packets come back: if the hook is gone, the packet is discarded. */ |
Martin Josefsson | e2b7606 | 2006-11-29 02:35:04 +0100 | [diff] [blame] | 59 | struct list_head nf_hooks[NPROTO][NF_MAX_HOOKS] __read_mostly; |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 60 | EXPORT_SYMBOL(nf_hooks); |
Patrick McHardy | fd706d6 | 2007-02-12 11:10:14 -0800 | [diff] [blame] | 61 | static DEFINE_MUTEX(nf_hook_mutex); |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 62 | |
| 63 | int nf_register_hook(struct nf_hook_ops *reg) |
| 64 | { |
Li Zefan | 4c61097 | 2007-12-04 23:22:26 -0800 | [diff] [blame^] | 65 | struct nf_hook_ops *elem; |
Patrick McHardy | fd706d6 | 2007-02-12 11:10:14 -0800 | [diff] [blame] | 66 | int err; |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 67 | |
Patrick McHardy | fd706d6 | 2007-02-12 11:10:14 -0800 | [diff] [blame] | 68 | err = mutex_lock_interruptible(&nf_hook_mutex); |
| 69 | if (err < 0) |
| 70 | return err; |
Li Zefan | 4c61097 | 2007-12-04 23:22:26 -0800 | [diff] [blame^] | 71 | list_for_each_entry(elem, &nf_hooks[reg->pf][reg->hooknum], list) { |
| 72 | if (reg->priority < elem->priority) |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 73 | break; |
| 74 | } |
Li Zefan | 4c61097 | 2007-12-04 23:22:26 -0800 | [diff] [blame^] | 75 | list_add_rcu(®->list, elem->list.prev); |
Patrick McHardy | fd706d6 | 2007-02-12 11:10:14 -0800 | [diff] [blame] | 76 | mutex_unlock(&nf_hook_mutex); |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 77 | return 0; |
| 78 | } |
| 79 | EXPORT_SYMBOL(nf_register_hook); |
| 80 | |
| 81 | void nf_unregister_hook(struct nf_hook_ops *reg) |
| 82 | { |
Patrick McHardy | fd706d6 | 2007-02-12 11:10:14 -0800 | [diff] [blame] | 83 | mutex_lock(&nf_hook_mutex); |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 84 | list_del_rcu(®->list); |
Patrick McHardy | fd706d6 | 2007-02-12 11:10:14 -0800 | [diff] [blame] | 85 | mutex_unlock(&nf_hook_mutex); |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 86 | |
| 87 | synchronize_net(); |
| 88 | } |
| 89 | EXPORT_SYMBOL(nf_unregister_hook); |
| 90 | |
Patrick McHardy | 972d1cb | 2006-04-06 14:09:12 -0700 | [diff] [blame] | 91 | int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n) |
| 92 | { |
| 93 | unsigned int i; |
| 94 | int err = 0; |
| 95 | |
| 96 | for (i = 0; i < n; i++) { |
| 97 | err = nf_register_hook(®[i]); |
| 98 | if (err) |
| 99 | goto err; |
| 100 | } |
| 101 | return err; |
| 102 | |
| 103 | err: |
| 104 | if (i > 0) |
| 105 | nf_unregister_hooks(reg, i); |
| 106 | return err; |
| 107 | } |
| 108 | EXPORT_SYMBOL(nf_register_hooks); |
| 109 | |
| 110 | void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n) |
| 111 | { |
| 112 | unsigned int i; |
| 113 | |
| 114 | for (i = 0; i < n; i++) |
| 115 | nf_unregister_hook(®[i]); |
| 116 | } |
| 117 | EXPORT_SYMBOL(nf_unregister_hooks); |
| 118 | |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 119 | unsigned int nf_iterate(struct list_head *head, |
Herbert Xu | 3db05fe | 2007-10-15 00:53:15 -0700 | [diff] [blame] | 120 | struct sk_buff *skb, |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 121 | int hook, |
| 122 | const struct net_device *indev, |
| 123 | const struct net_device *outdev, |
| 124 | struct list_head **i, |
| 125 | int (*okfn)(struct sk_buff *), |
| 126 | int hook_thresh) |
| 127 | { |
| 128 | unsigned int verdict; |
| 129 | |
| 130 | /* |
| 131 | * The caller must not block between calls to this |
| 132 | * function because of risk of continuing from deleted element. |
| 133 | */ |
| 134 | list_for_each_continue_rcu(*i, head) { |
| 135 | struct nf_hook_ops *elem = (struct nf_hook_ops *)*i; |
| 136 | |
| 137 | if (hook_thresh > elem->priority) |
| 138 | continue; |
| 139 | |
| 140 | /* Optimization: we don't need to hold module |
YOSHIFUJI Hideaki | 601e68e | 2007-02-12 11:15:49 -0800 | [diff] [blame] | 141 | reference here, since function can't sleep. --RR */ |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 142 | verdict = elem->hook(hook, skb, indev, outdev, okfn); |
| 143 | if (verdict != NF_ACCEPT) { |
| 144 | #ifdef CONFIG_NETFILTER_DEBUG |
| 145 | if (unlikely((verdict & NF_VERDICT_MASK) |
| 146 | > NF_MAX_VERDICT)) { |
| 147 | NFDEBUG("Evil return from %p(%u).\n", |
YOSHIFUJI Hideaki | 601e68e | 2007-02-12 11:15:49 -0800 | [diff] [blame] | 148 | elem->hook, hook); |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 149 | continue; |
| 150 | } |
| 151 | #endif |
| 152 | if (verdict != NF_REPEAT) |
| 153 | return verdict; |
| 154 | *i = (*i)->prev; |
| 155 | } |
| 156 | } |
| 157 | return NF_ACCEPT; |
| 158 | } |
| 159 | |
| 160 | |
| 161 | /* Returns 1 if okfn() needs to be executed by the caller, |
| 162 | * -EPERM for NF_DROP, 0 otherwise. */ |
Herbert Xu | 3db05fe | 2007-10-15 00:53:15 -0700 | [diff] [blame] | 163 | int nf_hook_slow(int pf, unsigned int hook, struct sk_buff *skb, |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 164 | struct net_device *indev, |
| 165 | struct net_device *outdev, |
| 166 | int (*okfn)(struct sk_buff *), |
| 167 | int hook_thresh) |
| 168 | { |
| 169 | struct list_head *elem; |
| 170 | unsigned int verdict; |
| 171 | int ret = 0; |
| 172 | |
| 173 | /* We may already have this, but read-locks nest anyway */ |
| 174 | rcu_read_lock(); |
| 175 | |
| 176 | elem = &nf_hooks[pf][hook]; |
| 177 | next_hook: |
Herbert Xu | 3db05fe | 2007-10-15 00:53:15 -0700 | [diff] [blame] | 178 | verdict = nf_iterate(&nf_hooks[pf][hook], skb, hook, indev, |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 179 | outdev, &elem, okfn, hook_thresh); |
| 180 | if (verdict == NF_ACCEPT || verdict == NF_STOP) { |
| 181 | ret = 1; |
| 182 | goto unlock; |
| 183 | } else if (verdict == NF_DROP) { |
Herbert Xu | 3db05fe | 2007-10-15 00:53:15 -0700 | [diff] [blame] | 184 | kfree_skb(skb); |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 185 | ret = -EPERM; |
| 186 | } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) { |
| 187 | NFDEBUG("nf_hook: Verdict = QUEUE.\n"); |
Herbert Xu | 3db05fe | 2007-10-15 00:53:15 -0700 | [diff] [blame] | 188 | if (!nf_queue(skb, elem, pf, hook, indev, outdev, okfn, |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 189 | verdict >> NF_VERDICT_BITS)) |
| 190 | goto next_hook; |
| 191 | } |
| 192 | unlock: |
| 193 | rcu_read_unlock(); |
| 194 | return ret; |
| 195 | } |
| 196 | EXPORT_SYMBOL(nf_hook_slow); |
| 197 | |
| 198 | |
Herbert Xu | 37d4187 | 2007-10-14 00:39:18 -0700 | [diff] [blame] | 199 | int skb_make_writable(struct sk_buff *skb, unsigned int writable_len) |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 200 | { |
Herbert Xu | 37d4187 | 2007-10-14 00:39:18 -0700 | [diff] [blame] | 201 | if (writable_len > skb->len) |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 202 | return 0; |
| 203 | |
| 204 | /* Not exclusive use of packet? Must copy. */ |
Herbert Xu | 37d4187 | 2007-10-14 00:39:18 -0700 | [diff] [blame] | 205 | if (!skb_cloned(skb)) { |
| 206 | if (writable_len <= skb_headlen(skb)) |
| 207 | return 1; |
| 208 | } else if (skb_clone_writable(skb, writable_len)) |
| 209 | return 1; |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 210 | |
Herbert Xu | 37d4187 | 2007-10-14 00:39:18 -0700 | [diff] [blame] | 211 | if (writable_len <= skb_headlen(skb)) |
| 212 | writable_len = 0; |
| 213 | else |
| 214 | writable_len -= skb_headlen(skb); |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 215 | |
Herbert Xu | 37d4187 | 2007-10-14 00:39:18 -0700 | [diff] [blame] | 216 | return !!__pskb_pull_tail(skb, writable_len); |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 217 | } |
| 218 | EXPORT_SYMBOL(skb_make_writable); |
| 219 | |
Yasuyuki Kozakai | 5f79e0f | 2007-03-23 11:17:07 -0700 | [diff] [blame] | 220 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 221 | /* This does not belong here, but locally generated errors need it if connection |
| 222 | tracking in use: without this, connection may not be in hash table, and hence |
| 223 | manufactured ICMP or RST packets will not be associated with it. */ |
| 224 | void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *); |
| 225 | EXPORT_SYMBOL(ip_ct_attach); |
| 226 | |
| 227 | void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) |
| 228 | { |
| 229 | void (*attach)(struct sk_buff *, struct sk_buff *); |
| 230 | |
Patrick McHardy | c3a47ab | 2007-02-12 11:09:19 -0800 | [diff] [blame] | 231 | if (skb->nfct) { |
| 232 | rcu_read_lock(); |
| 233 | attach = rcu_dereference(ip_ct_attach); |
| 234 | if (attach) |
| 235 | attach(new, skb); |
| 236 | rcu_read_unlock(); |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 237 | } |
| 238 | } |
| 239 | EXPORT_SYMBOL(nf_ct_attach); |
Yasuyuki Kozakai | de6e05c | 2007-03-23 11:17:27 -0700 | [diff] [blame] | 240 | |
| 241 | void (*nf_ct_destroy)(struct nf_conntrack *); |
| 242 | EXPORT_SYMBOL(nf_ct_destroy); |
| 243 | |
| 244 | void nf_conntrack_destroy(struct nf_conntrack *nfct) |
| 245 | { |
| 246 | void (*destroy)(struct nf_conntrack *); |
| 247 | |
| 248 | rcu_read_lock(); |
| 249 | destroy = rcu_dereference(nf_ct_destroy); |
| 250 | BUG_ON(destroy == NULL); |
| 251 | destroy(nfct); |
| 252 | rcu_read_unlock(); |
| 253 | } |
| 254 | EXPORT_SYMBOL(nf_conntrack_destroy); |
| 255 | #endif /* CONFIG_NF_CONNTRACK */ |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 256 | |
| 257 | #ifdef CONFIG_PROC_FS |
| 258 | struct proc_dir_entry *proc_net_netfilter; |
| 259 | EXPORT_SYMBOL(proc_net_netfilter); |
| 260 | #endif |
| 261 | |
| 262 | void __init netfilter_init(void) |
| 263 | { |
| 264 | int i, h; |
| 265 | for (i = 0; i < NPROTO; i++) { |
| 266 | for (h = 0; h < NF_MAX_HOOKS; h++) |
| 267 | INIT_LIST_HEAD(&nf_hooks[i][h]); |
| 268 | } |
| 269 | |
| 270 | #ifdef CONFIG_PROC_FS |
Eric W. Biederman | 457c4cb | 2007-09-12 12:01:34 +0200 | [diff] [blame] | 271 | proc_net_netfilter = proc_mkdir("netfilter", init_net.proc_net); |
Harald Welte | f6ebe77 | 2005-08-09 20:21:49 -0700 | [diff] [blame] | 272 | if (!proc_net_netfilter) |
| 273 | panic("cannot create netfilter proc entry"); |
| 274 | #endif |
| 275 | |
| 276 | if (netfilter_queue_init() < 0) |
| 277 | panic("cannot initialize nf_queue"); |
| 278 | if (netfilter_log_init() < 0) |
| 279 | panic("cannot initialize nf_log"); |
| 280 | } |