blob: a6728067780a747ef4341b0d2ff83e0bf754de4d [file] [log] [blame]
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -08001/* Connection state tracking for netfilter. This is separated from,
2 but required by, the NAT layer; it can also be used by an iptables
3 extension. */
4
5/* (C) 1999-2001 Paul `Rusty' Russell
Harald Weltedc808fe2006-03-20 17:56:32 -08006 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -08007 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * 23 Apr 2001: Harald Welte <laforge@gnumonks.org>
14 * - new API and handling of conntrack/nat helpers
15 * - now capable of multiple expectations for one master
16 * 16 Jul 2002: Harald Welte <laforge@gnumonks.org>
17 * - add usage/reference counts to ip_conntrack_expect
18 * - export ip_conntrack[_expect]_{find_get,put} functions
19 * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
20 * - generalize L3 protocol denendent part.
21 * 23 Mar 2004: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
22 * - add support various size of conntrack structures.
Harald Weltedc808fe2006-03-20 17:56:32 -080023 * 26 Jan 2006: Harald Welte <laforge@netfilter.org>
24 * - restructure nf_conn (introduce nf_conn_help)
25 * - redesign 'features' how they were originally intended
Pablo Neira Ayusob9f78f92006-03-22 13:56:08 -080026 * 26 Feb 2006: Pablo Neira Ayuso <pablo@eurodev.net>
27 * - add support for L3 protocol module load on demand.
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -080028 *
29 * Derived from net/ipv4/netfilter/ip_conntrack_core.c
30 */
31
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -080032#include <linux/types.h>
33#include <linux/netfilter.h>
34#include <linux/module.h>
35#include <linux/skbuff.h>
36#include <linux/proc_fs.h>
37#include <linux/vmalloc.h>
38#include <linux/stddef.h>
39#include <linux/slab.h>
40#include <linux/random.h>
41#include <linux/jhash.h>
42#include <linux/err.h>
43#include <linux/percpu.h>
44#include <linux/moduleparam.h>
45#include <linux/notifier.h>
46#include <linux/kernel.h>
47#include <linux/netdevice.h>
48#include <linux/socket.h>
49
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -080050#include <net/netfilter/nf_conntrack.h>
51#include <net/netfilter/nf_conntrack_l3proto.h>
Martin Josefsson605dcad2006-11-29 02:35:06 +010052#include <net/netfilter/nf_conntrack_l4proto.h>
Martin Josefsson77ab9cf2006-11-29 02:34:58 +010053#include <net/netfilter/nf_conntrack_expect.h>
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -080054#include <net/netfilter/nf_conntrack_helper.h>
55#include <net/netfilter/nf_conntrack_core.h>
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -080056
Harald Weltedc808fe2006-03-20 17:56:32 -080057#define NF_CONNTRACK_VERSION "0.5.0"
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -080058
59#if 0
60#define DEBUGP printk
61#else
62#define DEBUGP(format, args...)
63#endif
64
65DEFINE_RWLOCK(nf_conntrack_lock);
66
67/* nf_conntrack_standalone needs this */
68atomic_t nf_conntrack_count = ATOMIC_INIT(0);
Patrick McHardya999e682006-11-29 02:35:20 +010069EXPORT_SYMBOL_GPL(nf_conntrack_count);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -080070
71void (*nf_conntrack_destroyed)(struct nf_conn *conntrack) = NULL;
Martin Josefssone2b76062006-11-29 02:35:04 +010072unsigned int nf_conntrack_htable_size __read_mostly;
Brian Haley94aec082006-09-18 00:05:22 -070073int nf_conntrack_max __read_mostly;
Patrick McHardya999e682006-11-29 02:35:20 +010074EXPORT_SYMBOL_GPL(nf_conntrack_max);
Brian Haley1192e402006-09-20 12:03:46 -070075struct list_head *nf_conntrack_hash __read_mostly;
Martin Josefssone2b76062006-11-29 02:35:04 +010076struct nf_conn nf_conntrack_untracked __read_mostly;
Brian Haley94aec082006-09-18 00:05:22 -070077unsigned int nf_ct_log_invalid __read_mostly;
Martin Josefsson7e5d03b2006-11-29 02:34:59 +010078LIST_HEAD(unconfirmed);
Brian Haley1192e402006-09-20 12:03:46 -070079static int nf_conntrack_vmalloc __read_mostly;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -080080
Pablo Neira Ayuso4e3882f2006-03-22 13:55:11 -080081static unsigned int nf_conntrack_next_id;
Martin Josefsson77ab9cf2006-11-29 02:34:58 +010082
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -080083DEFINE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat);
84EXPORT_PER_CPU_SYMBOL(nf_conntrack_stat);
85
86/*
87 * This scheme offers various size of "struct nf_conn" dependent on
88 * features(helper, nat, ...)
89 */
90
91#define NF_CT_FEATURES_NAMELEN 256
92static struct {
93 /* name of slab cache. printed in /proc/slabinfo */
94 char *name;
95
96 /* size of slab cache */
97 size_t size;
98
99 /* slab cache pointer */
100 kmem_cache_t *cachep;
101
102 /* allocated slab cache + modules which uses this slab cache */
103 int use;
104
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800105} nf_ct_cache[NF_CT_F_NUM];
106
107/* protect members of nf_ct_cache except of "use" */
108DEFINE_RWLOCK(nf_ct_cache_lock);
109
110/* This avoids calling kmem_cache_create() with same name simultaneously */
Ingo Molnar57b47a52006-03-20 22:35:41 -0800111static DEFINE_MUTEX(nf_ct_cache_mutex);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800112
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800113static int nf_conntrack_hash_rnd_initted;
114static unsigned int nf_conntrack_hash_rnd;
115
116static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
117 unsigned int size, unsigned int rnd)
118{
119 unsigned int a, b;
120 a = jhash((void *)tuple->src.u3.all, sizeof(tuple->src.u3.all),
121 ((tuple->src.l3num) << 16) | tuple->dst.protonum);
122 b = jhash((void *)tuple->dst.u3.all, sizeof(tuple->dst.u3.all),
123 (tuple->src.u.all << 16) | tuple->dst.u.all);
124
125 return jhash_2words(a, b, rnd) % size;
126}
127
128static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple)
129{
130 return __hash_conntrack(tuple, nf_conntrack_htable_size,
131 nf_conntrack_hash_rnd);
132}
133
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800134int nf_conntrack_register_cache(u_int32_t features, const char *name,
Harald Weltedc808fe2006-03-20 17:56:32 -0800135 size_t size)
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800136{
137 int ret = 0;
138 char *cache_name;
139 kmem_cache_t *cachep;
140
141 DEBUGP("nf_conntrack_register_cache: features=0x%x, name=%s, size=%d\n",
142 features, name, size);
143
144 if (features < NF_CT_F_BASIC || features >= NF_CT_F_NUM) {
145 DEBUGP("nf_conntrack_register_cache: invalid features.: 0x%x\n",
146 features);
147 return -EINVAL;
148 }
149
Ingo Molnar57b47a52006-03-20 22:35:41 -0800150 mutex_lock(&nf_ct_cache_mutex);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800151
152 write_lock_bh(&nf_ct_cache_lock);
153 /* e.g: multiple helpers are loaded */
154 if (nf_ct_cache[features].use > 0) {
155 DEBUGP("nf_conntrack_register_cache: already resisterd.\n");
156 if ((!strncmp(nf_ct_cache[features].name, name,
157 NF_CT_FEATURES_NAMELEN))
Harald Weltedc808fe2006-03-20 17:56:32 -0800158 && nf_ct_cache[features].size == size) {
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800159 DEBUGP("nf_conntrack_register_cache: reusing.\n");
160 nf_ct_cache[features].use++;
161 ret = 0;
162 } else
163 ret = -EBUSY;
164
165 write_unlock_bh(&nf_ct_cache_lock);
Ingo Molnar57b47a52006-03-20 22:35:41 -0800166 mutex_unlock(&nf_ct_cache_mutex);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800167 return ret;
168 }
169 write_unlock_bh(&nf_ct_cache_lock);
170
171 /*
172 * The memory space for name of slab cache must be alive until
173 * cache is destroyed.
174 */
175 cache_name = kmalloc(sizeof(char)*NF_CT_FEATURES_NAMELEN, GFP_ATOMIC);
176 if (cache_name == NULL) {
177 DEBUGP("nf_conntrack_register_cache: can't alloc cache_name\n");
178 ret = -ENOMEM;
179 goto out_up_mutex;
180 }
181
182 if (strlcpy(cache_name, name, NF_CT_FEATURES_NAMELEN)
183 >= NF_CT_FEATURES_NAMELEN) {
184 printk("nf_conntrack_register_cache: name too long\n");
185 ret = -EINVAL;
186 goto out_free_name;
187 }
188
189 cachep = kmem_cache_create(cache_name, size, 0, 0,
190 NULL, NULL);
191 if (!cachep) {
192 printk("nf_conntrack_register_cache: Can't create slab cache "
193 "for the features = 0x%x\n", features);
194 ret = -ENOMEM;
195 goto out_free_name;
196 }
197
198 write_lock_bh(&nf_ct_cache_lock);
199 nf_ct_cache[features].use = 1;
200 nf_ct_cache[features].size = size;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800201 nf_ct_cache[features].cachep = cachep;
202 nf_ct_cache[features].name = cache_name;
203 write_unlock_bh(&nf_ct_cache_lock);
204
205 goto out_up_mutex;
206
207out_free_name:
208 kfree(cache_name);
209out_up_mutex:
Ingo Molnar57b47a52006-03-20 22:35:41 -0800210 mutex_unlock(&nf_ct_cache_mutex);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800211 return ret;
212}
213
214/* FIXME: In the current, only nf_conntrack_cleanup() can call this function. */
215void nf_conntrack_unregister_cache(u_int32_t features)
216{
217 kmem_cache_t *cachep;
218 char *name;
219
220 /*
221 * This assures that kmem_cache_create() isn't called before destroying
222 * slab cache.
223 */
224 DEBUGP("nf_conntrack_unregister_cache: 0x%04x\n", features);
Ingo Molnar57b47a52006-03-20 22:35:41 -0800225 mutex_lock(&nf_ct_cache_mutex);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800226
227 write_lock_bh(&nf_ct_cache_lock);
228 if (--nf_ct_cache[features].use > 0) {
229 write_unlock_bh(&nf_ct_cache_lock);
Ingo Molnar57b47a52006-03-20 22:35:41 -0800230 mutex_unlock(&nf_ct_cache_mutex);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800231 return;
232 }
233 cachep = nf_ct_cache[features].cachep;
234 name = nf_ct_cache[features].name;
235 nf_ct_cache[features].cachep = NULL;
236 nf_ct_cache[features].name = NULL;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800237 nf_ct_cache[features].size = 0;
238 write_unlock_bh(&nf_ct_cache_lock);
239
240 synchronize_net();
241
242 kmem_cache_destroy(cachep);
243 kfree(name);
244
Ingo Molnar57b47a52006-03-20 22:35:41 -0800245 mutex_unlock(&nf_ct_cache_mutex);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800246}
247
248int
249nf_ct_get_tuple(const struct sk_buff *skb,
250 unsigned int nhoff,
251 unsigned int dataoff,
252 u_int16_t l3num,
253 u_int8_t protonum,
254 struct nf_conntrack_tuple *tuple,
255 const struct nf_conntrack_l3proto *l3proto,
Martin Josefsson605dcad2006-11-29 02:35:06 +0100256 const struct nf_conntrack_l4proto *l4proto)
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800257{
258 NF_CT_TUPLE_U_BLANK(tuple);
259
260 tuple->src.l3num = l3num;
261 if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0)
262 return 0;
263
264 tuple->dst.protonum = protonum;
265 tuple->dst.dir = IP_CT_DIR_ORIGINAL;
266
Martin Josefsson605dcad2006-11-29 02:35:06 +0100267 return l4proto->pkt_to_tuple(skb, dataoff, tuple);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800268}
269
270int
271nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
272 const struct nf_conntrack_tuple *orig,
273 const struct nf_conntrack_l3proto *l3proto,
Martin Josefsson605dcad2006-11-29 02:35:06 +0100274 const struct nf_conntrack_l4proto *l4proto)
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800275{
276 NF_CT_TUPLE_U_BLANK(inverse);
277
278 inverse->src.l3num = orig->src.l3num;
279 if (l3proto->invert_tuple(inverse, orig) == 0)
280 return 0;
281
282 inverse->dst.dir = !orig->dst.dir;
283
284 inverse->dst.protonum = orig->dst.protonum;
Martin Josefsson605dcad2006-11-29 02:35:06 +0100285 return l4proto->invert_tuple(inverse, orig);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800286}
287
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800288static void
289clean_from_lists(struct nf_conn *ct)
290{
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800291 DEBUGP("clean_from_lists(%p)\n", ct);
Patrick McHardydf0933d2006-09-20 11:57:53 -0700292 list_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list);
293 list_del(&ct->tuplehash[IP_CT_DIR_REPLY].list);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800294
295 /* Destroy all pending expectations */
Pablo Neira Ayusoc1d10ad2006-01-05 12:19:05 -0800296 nf_ct_remove_expectations(ct);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800297}
298
299static void
300destroy_conntrack(struct nf_conntrack *nfct)
301{
302 struct nf_conn *ct = (struct nf_conn *)nfct;
303 struct nf_conntrack_l3proto *l3proto;
Martin Josefsson605dcad2006-11-29 02:35:06 +0100304 struct nf_conntrack_l4proto *l4proto;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800305
306 DEBUGP("destroy_conntrack(%p)\n", ct);
307 NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
308 NF_CT_ASSERT(!timer_pending(&ct->timeout));
309
310 nf_conntrack_event(IPCT_DESTROY, ct);
311 set_bit(IPS_DYING_BIT, &ct->status);
312
313 /* To make sure we don't get any weird locking issues here:
314 * destroy_conntrack() MUST NOT be called with a write lock
315 * to nf_conntrack_lock!!! -HW */
Pablo Neira Ayusoc1d10ad2006-01-05 12:19:05 -0800316 l3proto = __nf_ct_l3proto_find(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.l3num);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800317 if (l3proto && l3proto->destroy)
318 l3proto->destroy(ct);
319
Martin Josefsson605dcad2006-11-29 02:35:06 +0100320 l4proto = __nf_ct_l4proto_find(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.l3num, ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.protonum);
321 if (l4proto && l4proto->destroy)
322 l4proto->destroy(ct);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800323
324 if (nf_conntrack_destroyed)
325 nf_conntrack_destroyed(ct);
326
327 write_lock_bh(&nf_conntrack_lock);
328 /* Expectations will have been removed in clean_from_lists,
329 * except TFTP can create an expectation on the first packet,
330 * before connection is in the list, so we need to clean here,
331 * too. */
Pablo Neira Ayusoc1d10ad2006-01-05 12:19:05 -0800332 nf_ct_remove_expectations(ct);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800333
334 /* We overload first tuple to link into unconfirmed list. */
335 if (!nf_ct_is_confirmed(ct)) {
336 BUG_ON(list_empty(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list));
337 list_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list);
338 }
339
340 NF_CT_STAT_INC(delete);
341 write_unlock_bh(&nf_conntrack_lock);
342
343 if (ct->master)
344 nf_ct_put(ct->master);
345
346 DEBUGP("destroy_conntrack: returning ct=%p to slab\n", ct);
347 nf_conntrack_free(ct);
348}
349
350static void death_by_timeout(unsigned long ul_conntrack)
351{
352 struct nf_conn *ct = (void *)ul_conntrack;
353
354 write_lock_bh(&nf_conntrack_lock);
355 /* Inside lock so preempt is disabled on module removal path.
356 * Otherwise we can get spurious warnings. */
357 NF_CT_STAT_INC(delete_list);
358 clean_from_lists(ct);
359 write_unlock_bh(&nf_conntrack_lock);
360 nf_ct_put(ct);
361}
362
Pablo Neira Ayusoc1d10ad2006-01-05 12:19:05 -0800363struct nf_conntrack_tuple_hash *
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800364__nf_conntrack_find(const struct nf_conntrack_tuple *tuple,
365 const struct nf_conn *ignored_conntrack)
366{
367 struct nf_conntrack_tuple_hash *h;
368 unsigned int hash = hash_conntrack(tuple);
369
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800370 list_for_each_entry(h, &nf_conntrack_hash[hash], list) {
Patrick McHardydf0933d2006-09-20 11:57:53 -0700371 if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack &&
372 nf_ct_tuple_equal(tuple, &h->tuple)) {
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800373 NF_CT_STAT_INC(found);
374 return h;
375 }
376 NF_CT_STAT_INC(searched);
377 }
378
379 return NULL;
380}
381
382/* Find a connection corresponding to a tuple. */
383struct nf_conntrack_tuple_hash *
384nf_conntrack_find_get(const struct nf_conntrack_tuple *tuple,
385 const struct nf_conn *ignored_conntrack)
386{
387 struct nf_conntrack_tuple_hash *h;
388
389 read_lock_bh(&nf_conntrack_lock);
390 h = __nf_conntrack_find(tuple, ignored_conntrack);
391 if (h)
392 atomic_inc(&nf_ct_tuplehash_to_ctrack(h)->ct_general.use);
393 read_unlock_bh(&nf_conntrack_lock);
394
395 return h;
396}
397
Pablo Neira Ayusoc1d10ad2006-01-05 12:19:05 -0800398static void __nf_conntrack_hash_insert(struct nf_conn *ct,
399 unsigned int hash,
400 unsigned int repl_hash)
401{
402 ct->id = ++nf_conntrack_next_id;
Patrick McHardydf0933d2006-09-20 11:57:53 -0700403 list_add(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list,
404 &nf_conntrack_hash[hash]);
405 list_add(&ct->tuplehash[IP_CT_DIR_REPLY].list,
406 &nf_conntrack_hash[repl_hash]);
Pablo Neira Ayusoc1d10ad2006-01-05 12:19:05 -0800407}
408
409void nf_conntrack_hash_insert(struct nf_conn *ct)
410{
411 unsigned int hash, repl_hash;
412
413 hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
414 repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
415
416 write_lock_bh(&nf_conntrack_lock);
417 __nf_conntrack_hash_insert(ct, hash, repl_hash);
418 write_unlock_bh(&nf_conntrack_lock);
419}
420
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800421/* Confirm a connection given skb; places it in hash table */
422int
423__nf_conntrack_confirm(struct sk_buff **pskb)
424{
425 unsigned int hash, repl_hash;
Patrick McHardydf0933d2006-09-20 11:57:53 -0700426 struct nf_conntrack_tuple_hash *h;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800427 struct nf_conn *ct;
Patrick McHardydf0933d2006-09-20 11:57:53 -0700428 struct nf_conn_help *help;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800429 enum ip_conntrack_info ctinfo;
430
431 ct = nf_ct_get(*pskb, &ctinfo);
432
433 /* ipt_REJECT uses nf_conntrack_attach to attach related
434 ICMP/TCP RST packets in other direction. Actual packet
435 which created connection will be IP_CT_NEW or for an
436 expected connection, IP_CT_RELATED. */
437 if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
438 return NF_ACCEPT;
439
440 hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
441 repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
442
443 /* We're not in hash table, and we refuse to set up related
444 connections for unconfirmed conns. But packet copies and
445 REJECT will give spurious warnings here. */
446 /* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */
447
448 /* No external references means noone else could have
449 confirmed us. */
450 NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
451 DEBUGP("Confirming conntrack %p\n", ct);
452
453 write_lock_bh(&nf_conntrack_lock);
454
455 /* See if there's one in the list already, including reverse:
456 NAT could have grabbed it without realizing, since we're
457 not in the hash. If there is, we lost race. */
Patrick McHardydf0933d2006-09-20 11:57:53 -0700458 list_for_each_entry(h, &nf_conntrack_hash[hash], list)
459 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
460 &h->tuple))
461 goto out;
462 list_for_each_entry(h, &nf_conntrack_hash[repl_hash], list)
463 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
464 &h->tuple))
465 goto out;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800466
Patrick McHardydf0933d2006-09-20 11:57:53 -0700467 /* Remove from unconfirmed list */
468 list_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list);
469
470 __nf_conntrack_hash_insert(ct, hash, repl_hash);
471 /* Timer relative to confirmation time, not original
472 setting time, otherwise we'd get timer wrap in
473 weird delay cases. */
474 ct->timeout.expires += jiffies;
475 add_timer(&ct->timeout);
476 atomic_inc(&ct->ct_general.use);
477 set_bit(IPS_CONFIRMED_BIT, &ct->status);
478 NF_CT_STAT_INC(insert);
479 write_unlock_bh(&nf_conntrack_lock);
480 help = nfct_help(ct);
481 if (help && help->helper)
482 nf_conntrack_event_cache(IPCT_HELPER, *pskb);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800483#ifdef CONFIG_NF_NAT_NEEDED
Patrick McHardydf0933d2006-09-20 11:57:53 -0700484 if (test_bit(IPS_SRC_NAT_DONE_BIT, &ct->status) ||
485 test_bit(IPS_DST_NAT_DONE_BIT, &ct->status))
486 nf_conntrack_event_cache(IPCT_NATINFO, *pskb);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800487#endif
Patrick McHardydf0933d2006-09-20 11:57:53 -0700488 nf_conntrack_event_cache(master_ct(ct) ?
489 IPCT_RELATED : IPCT_NEW, *pskb);
490 return NF_ACCEPT;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800491
Patrick McHardydf0933d2006-09-20 11:57:53 -0700492out:
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800493 NF_CT_STAT_INC(insert_failed);
494 write_unlock_bh(&nf_conntrack_lock);
495 return NF_DROP;
496}
497
498/* Returns true if a connection correspondings to the tuple (required
499 for NAT). */
500int
501nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
502 const struct nf_conn *ignored_conntrack)
503{
504 struct nf_conntrack_tuple_hash *h;
505
506 read_lock_bh(&nf_conntrack_lock);
507 h = __nf_conntrack_find(tuple, ignored_conntrack);
508 read_unlock_bh(&nf_conntrack_lock);
509
510 return h != NULL;
511}
512
513/* There's a small race here where we may free a just-assured
514 connection. Too bad: we're in trouble anyway. */
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800515static int early_drop(struct list_head *chain)
516{
517 /* Traverse backwards: gives us oldest, which is roughly LRU */
518 struct nf_conntrack_tuple_hash *h;
Patrick McHardydf0933d2006-09-20 11:57:53 -0700519 struct nf_conn *ct = NULL, *tmp;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800520 int dropped = 0;
521
522 read_lock_bh(&nf_conntrack_lock);
Patrick McHardydf0933d2006-09-20 11:57:53 -0700523 list_for_each_entry_reverse(h, chain, list) {
524 tmp = nf_ct_tuplehash_to_ctrack(h);
525 if (!test_bit(IPS_ASSURED_BIT, &tmp->status)) {
526 ct = tmp;
527 atomic_inc(&ct->ct_general.use);
528 break;
529 }
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800530 }
531 read_unlock_bh(&nf_conntrack_lock);
532
533 if (!ct)
534 return dropped;
535
536 if (del_timer(&ct->timeout)) {
537 death_by_timeout((unsigned long)ct);
538 dropped = 1;
539 NF_CT_STAT_INC(early_drop);
540 }
541 nf_ct_put(ct);
542 return dropped;
543}
544
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800545static struct nf_conn *
546__nf_conntrack_alloc(const struct nf_conntrack_tuple *orig,
547 const struct nf_conntrack_tuple *repl,
548 const struct nf_conntrack_l3proto *l3proto)
549{
550 struct nf_conn *conntrack = NULL;
551 u_int32_t features = 0;
Harald Weltedc808fe2006-03-20 17:56:32 -0800552 struct nf_conntrack_helper *helper;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800553
Harald Weltedc808fe2006-03-20 17:56:32 -0800554 if (unlikely(!nf_conntrack_hash_rnd_initted)) {
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800555 get_random_bytes(&nf_conntrack_hash_rnd, 4);
556 nf_conntrack_hash_rnd_initted = 1;
557 }
558
Pablo Neira Ayuso5251e2d2006-09-20 12:01:06 -0700559 /* We don't want any race condition at early drop stage */
560 atomic_inc(&nf_conntrack_count);
561
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800562 if (nf_conntrack_max
Pablo Neira Ayuso5251e2d2006-09-20 12:01:06 -0700563 && atomic_read(&nf_conntrack_count) > nf_conntrack_max) {
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800564 unsigned int hash = hash_conntrack(orig);
565 /* Try dropping from this hash chain. */
566 if (!early_drop(&nf_conntrack_hash[hash])) {
Pablo Neira Ayuso5251e2d2006-09-20 12:01:06 -0700567 atomic_dec(&nf_conntrack_count);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800568 if (net_ratelimit())
569 printk(KERN_WARNING
570 "nf_conntrack: table full, dropping"
571 " packet.\n");
572 return ERR_PTR(-ENOMEM);
573 }
574 }
575
576 /* find features needed by this conntrack. */
577 features = l3proto->get_features(orig);
Harald Weltedc808fe2006-03-20 17:56:32 -0800578
579 /* FIXME: protect helper list per RCU */
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800580 read_lock_bh(&nf_conntrack_lock);
Harald Weltedc808fe2006-03-20 17:56:32 -0800581 helper = __nf_ct_helper_find(repl);
582 if (helper)
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800583 features |= NF_CT_F_HELP;
584 read_unlock_bh(&nf_conntrack_lock);
585
586 DEBUGP("nf_conntrack_alloc: features=0x%x\n", features);
587
588 read_lock_bh(&nf_ct_cache_lock);
589
Harald Weltedc808fe2006-03-20 17:56:32 -0800590 if (unlikely(!nf_ct_cache[features].use)) {
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800591 DEBUGP("nf_conntrack_alloc: not supported features = 0x%x\n",
592 features);
593 goto out;
594 }
595
596 conntrack = kmem_cache_alloc(nf_ct_cache[features].cachep, GFP_ATOMIC);
597 if (conntrack == NULL) {
598 DEBUGP("nf_conntrack_alloc: Can't alloc conntrack from cache\n");
599 goto out;
600 }
601
602 memset(conntrack, 0, nf_ct_cache[features].size);
603 conntrack->features = features;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800604 atomic_set(&conntrack->ct_general.use, 1);
605 conntrack->ct_general.destroy = destroy_conntrack;
606 conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
607 conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
608 /* Don't set timer yet: wait for confirmation */
609 init_timer(&conntrack->timeout);
610 conntrack->timeout.data = (unsigned long)conntrack;
611 conntrack->timeout.function = death_by_timeout;
Pablo Neira Ayuso5251e2d2006-09-20 12:01:06 -0700612 read_unlock_bh(&nf_ct_cache_lock);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800613
Pablo Neira Ayuso5251e2d2006-09-20 12:01:06 -0700614 return conntrack;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800615out:
616 read_unlock_bh(&nf_ct_cache_lock);
Pablo Neira Ayuso5251e2d2006-09-20 12:01:06 -0700617 atomic_dec(&nf_conntrack_count);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800618 return conntrack;
619}
620
621struct nf_conn *nf_conntrack_alloc(const struct nf_conntrack_tuple *orig,
622 const struct nf_conntrack_tuple *repl)
623{
624 struct nf_conntrack_l3proto *l3proto;
625
Pablo Neira Ayusoc1d10ad2006-01-05 12:19:05 -0800626 l3proto = __nf_ct_l3proto_find(orig->src.l3num);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800627 return __nf_conntrack_alloc(orig, repl, l3proto);
628}
629
630void nf_conntrack_free(struct nf_conn *conntrack)
631{
632 u_int32_t features = conntrack->features;
633 NF_CT_ASSERT(features >= NF_CT_F_BASIC && features < NF_CT_F_NUM);
634 DEBUGP("nf_conntrack_free: features = 0x%x, conntrack=%p\n", features,
635 conntrack);
636 kmem_cache_free(nf_ct_cache[features].cachep, conntrack);
637 atomic_dec(&nf_conntrack_count);
638}
639
640/* Allocate a new conntrack: we return -ENOMEM if classification
641 failed due to stress. Otherwise it really is unclassifiable. */
642static struct nf_conntrack_tuple_hash *
643init_conntrack(const struct nf_conntrack_tuple *tuple,
644 struct nf_conntrack_l3proto *l3proto,
Martin Josefsson605dcad2006-11-29 02:35:06 +0100645 struct nf_conntrack_l4proto *l4proto,
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800646 struct sk_buff *skb,
647 unsigned int dataoff)
648{
649 struct nf_conn *conntrack;
650 struct nf_conntrack_tuple repl_tuple;
651 struct nf_conntrack_expect *exp;
652
Martin Josefsson605dcad2006-11-29 02:35:06 +0100653 if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800654 DEBUGP("Can't invert tuple.\n");
655 return NULL;
656 }
657
658 conntrack = __nf_conntrack_alloc(tuple, &repl_tuple, l3proto);
659 if (conntrack == NULL || IS_ERR(conntrack)) {
660 DEBUGP("Can't allocate conntrack.\n");
661 return (struct nf_conntrack_tuple_hash *)conntrack;
662 }
663
Martin Josefsson605dcad2006-11-29 02:35:06 +0100664 if (!l4proto->new(conntrack, skb, dataoff)) {
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800665 nf_conntrack_free(conntrack);
666 DEBUGP("init conntrack: can't track with proto module\n");
667 return NULL;
668 }
669
670 write_lock_bh(&nf_conntrack_lock);
671 exp = find_expectation(tuple);
672
673 if (exp) {
674 DEBUGP("conntrack: expectation arrives ct=%p exp=%p\n",
675 conntrack, exp);
676 /* Welcome, Mr. Bond. We've been expecting you... */
677 __set_bit(IPS_EXPECTED_BIT, &conntrack->status);
678 conntrack->master = exp->master;
679#ifdef CONFIG_NF_CONNTRACK_MARK
680 conntrack->mark = exp->master->mark;
681#endif
James Morris7c9728c2006-06-09 00:31:46 -0700682#ifdef CONFIG_NF_CONNTRACK_SECMARK
683 conntrack->secmark = exp->master->secmark;
684#endif
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800685 nf_conntrack_get(&conntrack->master->ct_general);
686 NF_CT_STAT_INC(expect_new);
Yasuyuki Kozakai22e74102006-11-27 10:25:59 -0800687 } else {
688 struct nf_conn_help *help = nfct_help(conntrack);
689
690 if (help)
691 help->helper = __nf_ct_helper_find(&repl_tuple);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800692 NF_CT_STAT_INC(new);
Yasuyuki Kozakai22e74102006-11-27 10:25:59 -0800693 }
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800694
695 /* Overload tuple linked list to put us in unconfirmed list. */
696 list_add(&conntrack->tuplehash[IP_CT_DIR_ORIGINAL].list, &unconfirmed);
697
698 write_unlock_bh(&nf_conntrack_lock);
699
700 if (exp) {
701 if (exp->expectfn)
702 exp->expectfn(conntrack, exp);
703 nf_conntrack_expect_put(exp);
704 }
705
706 return &conntrack->tuplehash[IP_CT_DIR_ORIGINAL];
707}
708
709/* On success, returns conntrack ptr, sets skb->nfct and ctinfo */
710static inline struct nf_conn *
711resolve_normal_ct(struct sk_buff *skb,
712 unsigned int dataoff,
713 u_int16_t l3num,
714 u_int8_t protonum,
715 struct nf_conntrack_l3proto *l3proto,
Martin Josefsson605dcad2006-11-29 02:35:06 +0100716 struct nf_conntrack_l4proto *l4proto,
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800717 int *set_reply,
718 enum ip_conntrack_info *ctinfo)
719{
720 struct nf_conntrack_tuple tuple;
721 struct nf_conntrack_tuple_hash *h;
722 struct nf_conn *ct;
723
724 if (!nf_ct_get_tuple(skb, (unsigned int)(skb->nh.raw - skb->data),
725 dataoff, l3num, protonum, &tuple, l3proto,
Martin Josefsson605dcad2006-11-29 02:35:06 +0100726 l4proto)) {
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800727 DEBUGP("resolve_normal_ct: Can't get tuple\n");
728 return NULL;
729 }
730
731 /* look for tuple match */
732 h = nf_conntrack_find_get(&tuple, NULL);
733 if (!h) {
Martin Josefsson605dcad2006-11-29 02:35:06 +0100734 h = init_conntrack(&tuple, l3proto, l4proto, skb, dataoff);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800735 if (!h)
736 return NULL;
737 if (IS_ERR(h))
738 return (void *)h;
739 }
740 ct = nf_ct_tuplehash_to_ctrack(h);
741
742 /* It exists; we have (non-exclusive) reference. */
743 if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
744 *ctinfo = IP_CT_ESTABLISHED + IP_CT_IS_REPLY;
745 /* Please set reply bit if this packet OK */
746 *set_reply = 1;
747 } else {
748 /* Once we've had two way comms, always ESTABLISHED. */
749 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
750 DEBUGP("nf_conntrack_in: normal packet for %p\n", ct);
751 *ctinfo = IP_CT_ESTABLISHED;
752 } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
753 DEBUGP("nf_conntrack_in: related packet for %p\n", ct);
754 *ctinfo = IP_CT_RELATED;
755 } else {
756 DEBUGP("nf_conntrack_in: new packet for %p\n", ct);
757 *ctinfo = IP_CT_NEW;
758 }
759 *set_reply = 0;
760 }
761 skb->nfct = &ct->ct_general;
762 skb->nfctinfo = *ctinfo;
763 return ct;
764}
765
766unsigned int
767nf_conntrack_in(int pf, unsigned int hooknum, struct sk_buff **pskb)
768{
769 struct nf_conn *ct;
770 enum ip_conntrack_info ctinfo;
771 struct nf_conntrack_l3proto *l3proto;
Martin Josefsson605dcad2006-11-29 02:35:06 +0100772 struct nf_conntrack_l4proto *l4proto;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800773 unsigned int dataoff;
774 u_int8_t protonum;
775 int set_reply = 0;
776 int ret;
777
778 /* Previously seen (loopback or untracked)? Ignore. */
779 if ((*pskb)->nfct) {
780 NF_CT_STAT_INC(ignore);
781 return NF_ACCEPT;
782 }
783
Pablo Neira Ayusoc1d10ad2006-01-05 12:19:05 -0800784 l3proto = __nf_ct_l3proto_find((u_int16_t)pf);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800785 if ((ret = l3proto->prepare(pskb, hooknum, &dataoff, &protonum)) <= 0) {
786 DEBUGP("not prepared to track yet or error occured\n");
787 return -ret;
788 }
789
Martin Josefsson605dcad2006-11-29 02:35:06 +0100790 l4proto = __nf_ct_l4proto_find((u_int16_t)pf, protonum);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800791
792 /* It may be an special packet, error, unclean...
793 * inverse of the return code tells to the netfilter
794 * core what to do with the packet. */
Martin Josefsson605dcad2006-11-29 02:35:06 +0100795 if (l4proto->error != NULL &&
796 (ret = l4proto->error(*pskb, dataoff, &ctinfo, pf, hooknum)) <= 0) {
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800797 NF_CT_STAT_INC(error);
798 NF_CT_STAT_INC(invalid);
799 return -ret;
800 }
801
Martin Josefsson605dcad2006-11-29 02:35:06 +0100802 ct = resolve_normal_ct(*pskb, dataoff, pf, protonum, l3proto, l4proto,
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800803 &set_reply, &ctinfo);
804 if (!ct) {
805 /* Not valid part of a connection */
806 NF_CT_STAT_INC(invalid);
807 return NF_ACCEPT;
808 }
809
810 if (IS_ERR(ct)) {
811 /* Too stressed to deal. */
812 NF_CT_STAT_INC(drop);
813 return NF_DROP;
814 }
815
816 NF_CT_ASSERT((*pskb)->nfct);
817
Martin Josefsson605dcad2006-11-29 02:35:06 +0100818 ret = l4proto->packet(ct, *pskb, dataoff, ctinfo, pf, hooknum);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800819 if (ret < 0) {
820 /* Invalid: inverse of the return code tells
821 * the netfilter core what to do */
822 DEBUGP("nf_conntrack_in: Can't track with proto module\n");
823 nf_conntrack_put((*pskb)->nfct);
824 (*pskb)->nfct = NULL;
825 NF_CT_STAT_INC(invalid);
826 return -ret;
827 }
828
829 if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
830 nf_conntrack_event_cache(IPCT_STATUS, *pskb);
831
832 return ret;
833}
834
835int nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
836 const struct nf_conntrack_tuple *orig)
837{
838 return nf_ct_invert_tuple(inverse, orig,
Pablo Neira Ayusoc1d10ad2006-01-05 12:19:05 -0800839 __nf_ct_l3proto_find(orig->src.l3num),
Martin Josefsson605dcad2006-11-29 02:35:06 +0100840 __nf_ct_l4proto_find(orig->src.l3num,
Pablo Neira Ayusoc1d10ad2006-01-05 12:19:05 -0800841 orig->dst.protonum));
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800842}
843
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800844/* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
845void __nf_ct_refresh_acct(struct nf_conn *ct,
846 enum ip_conntrack_info ctinfo,
847 const struct sk_buff *skb,
848 unsigned long extra_jiffies,
849 int do_acct)
850{
851 int event = 0;
852
853 NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct);
854 NF_CT_ASSERT(skb);
855
856 write_lock_bh(&nf_conntrack_lock);
857
Eric Leblond997ae832006-05-29 18:24:20 -0700858 /* Only update if this is not a fixed timeout */
859 if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) {
860 write_unlock_bh(&nf_conntrack_lock);
861 return;
862 }
863
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800864 /* If not in hash table, timer will not be active yet */
865 if (!nf_ct_is_confirmed(ct)) {
866 ct->timeout.expires = extra_jiffies;
867 event = IPCT_REFRESH;
868 } else {
Martin Josefssonbe00c8e2006-11-29 02:35:12 +0100869 unsigned long newtime = jiffies + extra_jiffies;
870
871 /* Only update the timeout if the new timeout is at least
872 HZ jiffies from the old timeout. Need del_timer for race
873 avoidance (may already be dying). */
874 if (newtime - ct->timeout.expires >= HZ
875 && del_timer(&ct->timeout)) {
876 ct->timeout.expires = newtime;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800877 add_timer(&ct->timeout);
878 event = IPCT_REFRESH;
879 }
880 }
881
882#ifdef CONFIG_NF_CT_ACCT
883 if (do_acct) {
884 ct->counters[CTINFO2DIR(ctinfo)].packets++;
885 ct->counters[CTINFO2DIR(ctinfo)].bytes +=
886 skb->len - (unsigned int)(skb->nh.raw - skb->data);
Martin Josefsson3ffd5ee2006-11-29 02:35:10 +0100887
888 if ((ct->counters[CTINFO2DIR(ctinfo)].packets & 0x80000000)
889 || (ct->counters[CTINFO2DIR(ctinfo)].bytes & 0x80000000))
890 event |= IPCT_COUNTER_FILLING;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800891 }
892#endif
893
894 write_unlock_bh(&nf_conntrack_lock);
895
896 /* must be unlocked when calling event cache */
897 if (event)
898 nf_conntrack_event_cache(event, skb);
899}
900
Pablo Neira Ayusoc1d10ad2006-01-05 12:19:05 -0800901#if defined(CONFIG_NF_CT_NETLINK) || \
902 defined(CONFIG_NF_CT_NETLINK_MODULE)
903
904#include <linux/netfilter/nfnetlink.h>
905#include <linux/netfilter/nfnetlink_conntrack.h>
Ingo Molnar57b47a52006-03-20 22:35:41 -0800906#include <linux/mutex.h>
907
Pablo Neira Ayusoc1d10ad2006-01-05 12:19:05 -0800908
909/* Generic function for tcp/udp/sctp/dccp and alike. This needs to be
910 * in ip_conntrack_core, since we don't want the protocols to autoload
911 * or depend on ctnetlink */
912int nf_ct_port_tuple_to_nfattr(struct sk_buff *skb,
913 const struct nf_conntrack_tuple *tuple)
914{
915 NFA_PUT(skb, CTA_PROTO_SRC_PORT, sizeof(u_int16_t),
916 &tuple->src.u.tcp.port);
917 NFA_PUT(skb, CTA_PROTO_DST_PORT, sizeof(u_int16_t),
918 &tuple->dst.u.tcp.port);
919 return 0;
920
921nfattr_failure:
922 return -1;
923}
924
925static const size_t cta_min_proto[CTA_PROTO_MAX] = {
926 [CTA_PROTO_SRC_PORT-1] = sizeof(u_int16_t),
927 [CTA_PROTO_DST_PORT-1] = sizeof(u_int16_t)
928};
929
930int nf_ct_port_nfattr_to_tuple(struct nfattr *tb[],
931 struct nf_conntrack_tuple *t)
932{
933 if (!tb[CTA_PROTO_SRC_PORT-1] || !tb[CTA_PROTO_DST_PORT-1])
934 return -EINVAL;
935
936 if (nfattr_bad_size(tb, CTA_PROTO_MAX, cta_min_proto))
937 return -EINVAL;
938
939 t->src.u.tcp.port =
940 *(u_int16_t *)NFA_DATA(tb[CTA_PROTO_SRC_PORT-1]);
941 t->dst.u.tcp.port =
942 *(u_int16_t *)NFA_DATA(tb[CTA_PROTO_DST_PORT-1]);
943
944 return 0;
945}
946#endif
947
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800948/* Used by ipt_REJECT and ip6t_REJECT. */
949void __nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb)
950{
951 struct nf_conn *ct;
952 enum ip_conntrack_info ctinfo;
953
954 /* This ICMP is in reverse direction to the packet which caused it */
955 ct = nf_ct_get(skb, &ctinfo);
956 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
957 ctinfo = IP_CT_RELATED + IP_CT_IS_REPLY;
958 else
959 ctinfo = IP_CT_RELATED;
960
961 /* Attach to new skbuff, and increment count */
962 nskb->nfct = &ct->ct_general;
963 nskb->nfctinfo = ctinfo;
964 nf_conntrack_get(nskb->nfct);
965}
966
967static inline int
968do_iter(const struct nf_conntrack_tuple_hash *i,
969 int (*iter)(struct nf_conn *i, void *data),
970 void *data)
971{
972 return iter(nf_ct_tuplehash_to_ctrack(i), data);
973}
974
975/* Bring out ya dead! */
Patrick McHardydf0933d2006-09-20 11:57:53 -0700976static struct nf_conn *
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800977get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
978 void *data, unsigned int *bucket)
979{
Patrick McHardydf0933d2006-09-20 11:57:53 -0700980 struct nf_conntrack_tuple_hash *h;
981 struct nf_conn *ct;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800982
983 write_lock_bh(&nf_conntrack_lock);
984 for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
Patrick McHardydf0933d2006-09-20 11:57:53 -0700985 list_for_each_entry(h, &nf_conntrack_hash[*bucket], list) {
986 ct = nf_ct_tuplehash_to_ctrack(h);
987 if (iter(ct, data))
988 goto found;
989 }
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800990 }
Patrick McHardydf0933d2006-09-20 11:57:53 -0700991 list_for_each_entry(h, &unconfirmed, list) {
992 ct = nf_ct_tuplehash_to_ctrack(h);
993 if (iter(ct, data))
994 goto found;
995 }
Martin Josefssonc073e3f2006-10-30 15:13:58 -0800996 write_unlock_bh(&nf_conntrack_lock);
Patrick McHardydf0933d2006-09-20 11:57:53 -0700997 return NULL;
998found:
Martin Josefssonc073e3f2006-10-30 15:13:58 -0800999 atomic_inc(&ct->ct_general.use);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -08001000 write_unlock_bh(&nf_conntrack_lock);
Patrick McHardydf0933d2006-09-20 11:57:53 -07001001 return ct;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -08001002}
1003
1004void
1005nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data), void *data)
1006{
Patrick McHardydf0933d2006-09-20 11:57:53 -07001007 struct nf_conn *ct;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -08001008 unsigned int bucket = 0;
1009
Patrick McHardydf0933d2006-09-20 11:57:53 -07001010 while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) {
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -08001011 /* Time to push up daises... */
1012 if (del_timer(&ct->timeout))
1013 death_by_timeout((unsigned long)ct);
1014 /* ... else the timer will get him soon. */
1015
1016 nf_ct_put(ct);
1017 }
1018}
1019
1020static int kill_all(struct nf_conn *i, void *data)
1021{
1022 return 1;
1023}
1024
1025static void free_conntrack_hash(struct list_head *hash, int vmalloced, int size)
1026{
1027 if (vmalloced)
1028 vfree(hash);
1029 else
1030 free_pages((unsigned long)hash,
1031 get_order(sizeof(struct list_head) * size));
1032}
1033
Pablo Neira Ayusoc1d10ad2006-01-05 12:19:05 -08001034void nf_conntrack_flush()
1035{
1036 nf_ct_iterate_cleanup(kill_all, NULL);
1037}
1038
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -08001039/* Mishearing the voices in his head, our hero wonders how he's
1040 supposed to kill the mall. */
1041void nf_conntrack_cleanup(void)
1042{
1043 int i;
1044
Yasuyuki Kozakai7d3cdc62006-02-15 15:22:21 -08001045 ip_ct_attach = NULL;
1046
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -08001047 /* This makes sure all current packets have passed through
1048 netfilter framework. Roll on, two-stage module
1049 delete... */
1050 synchronize_net();
1051
1052 nf_ct_event_cache_flush();
1053 i_see_dead_people:
Pablo Neira Ayusoc1d10ad2006-01-05 12:19:05 -08001054 nf_conntrack_flush();
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -08001055 if (atomic_read(&nf_conntrack_count) != 0) {
1056 schedule();
1057 goto i_see_dead_people;
1058 }
Patrick McHardy66365682005-12-05 13:36:50 -08001059 /* wait until all references to nf_conntrack_untracked are dropped */
1060 while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1)
1061 schedule();
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -08001062
1063 for (i = 0; i < NF_CT_F_NUM; i++) {
1064 if (nf_ct_cache[i].use == 0)
1065 continue;
1066
1067 NF_CT_ASSERT(nf_ct_cache[i].use == 1);
1068 nf_ct_cache[i].use = 1;
1069 nf_conntrack_unregister_cache(i);
1070 }
1071 kmem_cache_destroy(nf_conntrack_expect_cachep);
1072 free_conntrack_hash(nf_conntrack_hash, nf_conntrack_vmalloc,
1073 nf_conntrack_htable_size);
KOVACS Krisztian5a6f2942005-11-15 16:47:34 -08001074
Patrick McHardy933a41e2006-11-29 02:35:18 +01001075 nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_generic);
1076
KOVACS Krisztian5a6f2942005-11-15 16:47:34 -08001077 /* free l3proto protocol tables */
1078 for (i = 0; i < PF_MAX; i++)
1079 if (nf_ct_protos[i]) {
1080 kfree(nf_ct_protos[i]);
1081 nf_ct_protos[i] = NULL;
1082 }
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -08001083}
1084
1085static struct list_head *alloc_hashtable(int size, int *vmalloced)
1086{
1087 struct list_head *hash;
1088 unsigned int i;
1089
1090 *vmalloced = 0;
1091 hash = (void*)__get_free_pages(GFP_KERNEL,
1092 get_order(sizeof(struct list_head)
1093 * size));
1094 if (!hash) {
1095 *vmalloced = 1;
1096 printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
1097 hash = vmalloc(sizeof(struct list_head) * size);
1098 }
1099
1100 if (hash)
1101 for (i = 0; i < size; i++)
1102 INIT_LIST_HEAD(&hash[i]);
1103
1104 return hash;
1105}
1106
1107int set_hashsize(const char *val, struct kernel_param *kp)
1108{
1109 int i, bucket, hashsize, vmalloced;
1110 int old_vmalloced, old_size;
1111 int rnd;
1112 struct list_head *hash, *old_hash;
1113 struct nf_conntrack_tuple_hash *h;
1114
1115 /* On boot, we can set this without any fancy locking. */
1116 if (!nf_conntrack_htable_size)
1117 return param_set_uint(val, kp);
1118
1119 hashsize = simple_strtol(val, NULL, 0);
1120 if (!hashsize)
1121 return -EINVAL;
1122
1123 hash = alloc_hashtable(hashsize, &vmalloced);
1124 if (!hash)
1125 return -ENOMEM;
1126
1127 /* We have to rehahs for the new table anyway, so we also can
1128 * use a newrandom seed */
1129 get_random_bytes(&rnd, 4);
1130
1131 write_lock_bh(&nf_conntrack_lock);
1132 for (i = 0; i < nf_conntrack_htable_size; i++) {
1133 while (!list_empty(&nf_conntrack_hash[i])) {
1134 h = list_entry(nf_conntrack_hash[i].next,
1135 struct nf_conntrack_tuple_hash, list);
1136 list_del(&h->list);
1137 bucket = __hash_conntrack(&h->tuple, hashsize, rnd);
1138 list_add_tail(&h->list, &hash[bucket]);
1139 }
1140 }
1141 old_size = nf_conntrack_htable_size;
1142 old_vmalloced = nf_conntrack_vmalloc;
1143 old_hash = nf_conntrack_hash;
1144
1145 nf_conntrack_htable_size = hashsize;
1146 nf_conntrack_vmalloc = vmalloced;
1147 nf_conntrack_hash = hash;
1148 nf_conntrack_hash_rnd = rnd;
1149 write_unlock_bh(&nf_conntrack_lock);
1150
1151 free_conntrack_hash(old_hash, old_vmalloced, old_size);
1152 return 0;
1153}
1154
1155module_param_call(hashsize, set_hashsize, param_get_uint,
1156 &nf_conntrack_htable_size, 0600);
1157
1158int __init nf_conntrack_init(void)
1159{
1160 unsigned int i;
1161 int ret;
1162
1163 /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB
1164 * machine has 256 buckets. >= 1GB machines have 8192 buckets. */
1165 if (!nf_conntrack_htable_size) {
1166 nf_conntrack_htable_size
1167 = (((num_physpages << PAGE_SHIFT) / 16384)
1168 / sizeof(struct list_head));
1169 if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE))
1170 nf_conntrack_htable_size = 8192;
1171 if (nf_conntrack_htable_size < 16)
1172 nf_conntrack_htable_size = 16;
1173 }
1174 nf_conntrack_max = 8 * nf_conntrack_htable_size;
1175
1176 printk("nf_conntrack version %s (%u buckets, %d max)\n",
1177 NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
1178 nf_conntrack_max);
1179
1180 nf_conntrack_hash = alloc_hashtable(nf_conntrack_htable_size,
1181 &nf_conntrack_vmalloc);
1182 if (!nf_conntrack_hash) {
1183 printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
1184 goto err_out;
1185 }
1186
1187 ret = nf_conntrack_register_cache(NF_CT_F_BASIC, "nf_conntrack:basic",
Harald Weltedc808fe2006-03-20 17:56:32 -08001188 sizeof(struct nf_conn));
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -08001189 if (ret < 0) {
1190 printk(KERN_ERR "Unable to create nf_conn slab cache\n");
1191 goto err_free_hash;
1192 }
1193
1194 nf_conntrack_expect_cachep = kmem_cache_create("nf_conntrack_expect",
1195 sizeof(struct nf_conntrack_expect),
1196 0, 0, NULL, NULL);
1197 if (!nf_conntrack_expect_cachep) {
1198 printk(KERN_ERR "Unable to create nf_expect slab cache\n");
1199 goto err_free_conntrack_slab;
1200 }
1201
Patrick McHardy933a41e2006-11-29 02:35:18 +01001202 ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_generic);
1203 if (ret < 0)
1204 goto out_free_expect_slab;
1205
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -08001206 /* Don't NEED lock here, but good form anyway. */
1207 write_lock_bh(&nf_conntrack_lock);
Martin Josefssonae5718f2006-11-29 02:35:08 +01001208 for (i = 0; i < AF_MAX; i++)
Martin Josefsson605dcad2006-11-29 02:35:06 +01001209 nf_ct_l3protos[i] = &nf_conntrack_l3proto_generic;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -08001210 write_unlock_bh(&nf_conntrack_lock);
1211
Yasuyuki Kozakai7d3cdc62006-02-15 15:22:21 -08001212 /* For use by REJECT target */
1213 ip_ct_attach = __nf_conntrack_attach;
1214
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -08001215 /* Set up fake conntrack:
1216 - to never be deleted, not in any hashes */
1217 atomic_set(&nf_conntrack_untracked.ct_general.use, 1);
1218 /* - and look it like as a confirmed connection */
1219 set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status);
1220
1221 return ret;
1222
Patrick McHardy933a41e2006-11-29 02:35:18 +01001223out_free_expect_slab:
1224 kmem_cache_destroy(nf_conntrack_expect_cachep);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -08001225err_free_conntrack_slab:
1226 nf_conntrack_unregister_cache(NF_CT_F_BASIC);
1227err_free_hash:
1228 free_conntrack_hash(nf_conntrack_hash, nf_conntrack_vmalloc,
1229 nf_conntrack_htable_size);
1230err_out:
1231 return -ENOMEM;
1232}