complete libiptc rewrite.  Time to load 10k rules goes down from 2.20 minutes to 1.255 seconds (!).  Might still contain bugs, use with caution.
diff --git a/libiptc/libip4tc.c b/libiptc/libip4tc.c
index d394315..399d591 100644
--- a/libiptc/libip4tc.c
+++ b/libiptc/libip4tc.c
@@ -129,8 +129,8 @@
 	size_t i;
 	STRUCT_ENTRY_TARGET *t;
 
-	printf("Entry %u (%lu):\n", entry2index(handle, e),
-	       entry2offset(handle, e));
+	printf("Entry %u (%lu):\n", iptcb_entry2index(handle, e),
+	       iptcb_entry2offset(handle, e));
 	printf("SRC IP: %u.%u.%u.%u/%u.%u.%u.%u\n",
 	       IP_PARTS(e->ip.src.s_addr),IP_PARTS(e->ip.smsk.s_addr));
 	printf("DST IP: %u.%u.%u.%u/%u.%u.%u.%u\n",
@@ -238,6 +238,7 @@
    	return 1;
 }
 
+#if 0
 /***************************** DEBUGGING ********************************/
 static inline int
 unconditional(const struct ipt_ip *ip)
@@ -292,20 +293,20 @@
 		assert(t->verdict == -NF_DROP-1
 		       || t->verdict == -NF_ACCEPT-1
 		       || t->verdict == RETURN
-		       || t->verdict < (int)h->entries.size);
+		       || t->verdict < (int)h->entries->size);
 
 		if (t->verdict >= 0) {
 			STRUCT_ENTRY *te = get_entry(h, t->verdict);
 			int idx;
 
-			idx = entry2index(h, te);
+			idx = iptcb_entry2index(h, te);
 			assert(strcmp(GET_TARGET(te)->u.user.name,
 				      IPT_ERROR_TARGET)
 			       != 0);
 			assert(te != e);
 
 			/* Prior node must be error node, or this node. */
-			assert(t->verdict == entry2offset(h, e)+e->next_offset
+			assert(t->verdict == iptcb_entry2offset(h, e)+e->next_offset
 			       || strcmp(GET_TARGET(index2entry(h, idx-1))
 					 ->u.user.name, IPT_ERROR_TARGET)
 			       == 0);
@@ -518,3 +519,5 @@
 		      ERROR_TARGET) == 0);
 }
 #endif /*IPTC_DEBUG*/
+
+#endif
diff --git a/libiptc/libip6tc.c b/libiptc/libip6tc.c
index 6400ce3..c915ccb 100644
--- a/libiptc/libip6tc.c
+++ b/libiptc/libip6tc.c
@@ -136,8 +136,8 @@
 	int len;
 	struct ip6t_entry_target *t;
 	
-	printf("Entry %u (%lu):\n", entry2index(handle, e),
-	       entry2offset(handle, e));
+	printf("Entry %u (%lu):\n", iptcb_entry2index(handle, e),
+	       iptcb_entry2offset(handle, e));
 	puts("SRC IP: ");
 	inet_ntop(AF_INET6, &e->ipv6.src, buf, sizeof buf);
 	puts(buf);
@@ -428,8 +428,8 @@
 		assert(t->verdict == -NF_DROP-1 || t->verdict == -NF_ACCEPT-1);
 
 		/* Hooks and underflows must be valid entries */
-		entry2index(h, get_entry(h, h->info.hook_entry[i]));
-		entry2index(h, get_entry(h, h->info.underflow[i]));
+		iptcb_entry2index(h, get_entry(h, h->info.hook_entry[i]));
+		iptcb_entry2index(h, get_entry(h, h->info.underflow[i]));
 	}
 
 	assert(h->info.size
diff --git a/libiptc/libiptc.c b/libiptc/libiptc.c
index b29111d..bf32617 100644
--- a/libiptc/libiptc.c
+++ b/libiptc/libiptc.c
@@ -1,4 +1,4 @@
-/* Library which manipulates firewall rules.  Version $Revision: 1.46 $ */
+/* Library which manipulates firewall rules.  Version $Revision: 1.39 $ */
 
 /* Architecture of firewall rules is as follows:
  *
@@ -10,7 +10,7 @@
 
 /* (C) 1999 Paul ``Rusty'' Russell - Placed under the GNU GPL (See
  * COPYING for details). 
- * (C) 2000-2003 by the Netfilter Core Team <coreteam@netfilter.org>
+ * (C) 2000-2004 by the Netfilter Core Team <coreteam@netfilter.org>
  *
  * 2003-Jun-20: Harald Welte <laforge@netfilter.org>:
  *	- Reimplementation of chain cache to use offsets instead of entries
@@ -18,11 +18,28 @@
  * 	- performance optimization, sponsored by Astaro AG (http://www.astaro.com/)
  * 	  don't rebuild the chain cache after every operation, instead fix it
  * 	  up after a ruleset change.  
+ * 2004-Aug-18: Harald Welte <laforge@netfilter.org>:
+ * 	- futher performance work: total reimplementation of libiptc.
+ * 	- libiptc now has a real internal (linked-list) represntation of the
+ * 	  ruleset and a parser/compiler from/to this internal representation
+ * 	- again sponsored by Astaro AG (http://www.astaro.com/)
  */
-
 #include <sys/types.h>
 #include <sys/socket.h>
 
+#include "linux_list.h"
+
+//#define IPTC_DEBUG2 1
+
+#ifdef IPTC_DEBUG2
+#include <fcntl.h>
+#define DEBUGP(x, args...)	fprintf(stderr, "%s: " x, __FUNCTION__, ## args)
+#define DEBUGP_C(x, args...)	fprintf(stderr, x, ## args)
+#else
+#define DEBUGP(x, args...)
+#define DEBUGP_C(x, args...)
+#endif
+
 #ifndef IPT_LIB_DIR
 #define IPT_LIB_DIR "/usr/local/lib/iptables"
 #endif
@@ -49,6 +66,16 @@
 #endif
 };
 
+/* Convenience structures */
+struct ipt_error_target
+{
+	STRUCT_ENTRY_TARGET t;
+	char error[TABLE_MAXNAMELEN];
+};
+
+struct chain_head;
+struct rule_head;
+
 struct counter_map
 {
 	enum {
@@ -60,49 +87,92 @@
 	unsigned int mappos;
 };
 
-/* Convenience structures */
-struct ipt_error_target
-{
-	STRUCT_ENTRY_TARGET t;
-	char error[TABLE_MAXNAMELEN];
+enum iptcc_rule_type {
+	IPTCC_R_STANDARD,		/* standard target (ACCEPT, ...) */
+	IPTCC_R_MODULE,			/* extension module (SNAT, ...) */
+	IPTCC_R_FALLTHROUGH,		/* fallthrough rule */
+	IPTCC_R_JUMP,			/* jump to other chain */
 };
 
-struct chain_cache
+struct rule_head
 {
+	struct list_head list;
+	struct chain_head *chain;
+	struct counter_map counter_map;
+
+	unsigned int index;		/* index (needed for counter_map) */
+	unsigned int offset;		/* offset in rule blob */
+
+	enum iptcc_rule_type type;
+	struct chain_head *jump;	/* jump target, if IPTCC_R_JUMP */
+
+	unsigned int size;		/* size of entry data */
+	STRUCT_ENTRY entry[0];
+};
+
+struct chain_head
+{
+	struct list_head list;
 	char name[TABLE_MAXNAMELEN];
-	/* This is the first rule in chain. */
-	unsigned int start_off;
-	/* Last rule in chain */
-	unsigned int end_off;
+	unsigned int hooknum;		/* hook number+1 if builtin */
+	unsigned int references;	/* how many jumps reference us */
+	int verdict;			/* verdict if builtin */
+
+	STRUCT_COUNTERS counters;	/* per-chain counters */
+	struct counter_map counter_map;
+
+	unsigned int num_rules;		/* number of rules in list */
+	struct list_head rules;		/* list of rules */
+
+	unsigned int index;		/* index (needed for jump resolval) */
+	unsigned int head_offset;	/* offset in rule blob */
+	unsigned int foot_index;	/* index (needed for counter_map) */
+	unsigned int foot_offset;	/* offset in rule blob */
 };
 
 STRUCT_TC_HANDLE
 {
-	/* Have changes been made? */
-	int changed;
-	/* Size in here reflects original state. */
+	int changed;			 /* Have changes been made? */
+
+	struct list_head chains;
+	
+	struct chain_head *chain_iterator_cur;
+	struct rule_head *rule_iterator_cur;
+
 	STRUCT_GETINFO info;
-
-	struct counter_map *counter_map;
-	/* Array of hook names */
-	const char **hooknames;
-
-	/* Cached position of chain heads (NULL = no cache). */
-	unsigned int cache_num_chains;
-	unsigned int cache_num_builtins;
-	struct chain_cache *cache_chain_heads;
-
-	/* Chain iterator: current chain cache entry. */
-	struct chain_cache *cache_chain_iteration;
-
-	/* Rule iterator: terminal rule */
-	STRUCT_ENTRY *cache_rule_end;
-
-	/* Number in here reflects current state. */
-	unsigned int new_number;
-	STRUCT_GET_ENTRIES entries;
+	STRUCT_GET_ENTRIES *entries;
 };
 
+/* allocate a new chain head for the cache */
+static struct chain_head *iptcc_alloc_chain_head(const char *name, int hooknum)
+{
+	struct chain_head *c = malloc(sizeof(*c));
+	if (!c)
+		return NULL;
+	memset(c, 0, sizeof(*c));
+
+	strncpy(c->name, name, TABLE_MAXNAMELEN);
+	c->hooknum = hooknum;
+	INIT_LIST_HEAD(&c->rules);
+
+	return c;
+}
+
+/* allocate and initialize a new rule for the cache */
+static struct rule_head *iptcc_alloc_rule(struct chain_head *c, unsigned int size)
+{
+	struct rule_head *r = malloc(sizeof(*r)+size);
+	if (!r)
+		return NULL;
+	memset(r, 0, sizeof(*r));
+
+	r->chain = c;
+	r->size = size;
+
+	return r;
+}
+
+/* notify us that the ruleset has been modified by the user */
 static void
 set_changed(TC_HANDLE_T h)
 {
@@ -116,8 +186,13 @@
 #define CHECK(h)
 #endif
 
+
+/**********************************************************************
+ * iptc blob utility functions (iptcb_*)
+ **********************************************************************/
+
 static inline int
-get_number(const STRUCT_ENTRY *i,
+iptcb_get_number(const STRUCT_ENTRY *i,
 	   const STRUCT_ENTRY *seek,
 	   unsigned int *pos)
 {
@@ -127,22 +202,8 @@
 	return 0;
 }
 
-static unsigned int
-entry2index(const TC_HANDLE_T h, const STRUCT_ENTRY *seek)
-{
-	unsigned int pos = 0;
-
-	if (ENTRY_ITERATE(h->entries.entrytable, h->entries.size,
-			  get_number, seek, &pos) == 0) {
-		fprintf(stderr, "ERROR: offset %u not an entry!\n",
-			(unsigned int)((char *)seek - (char *)h->entries.entrytable));
-		abort();
-	}
-	return pos;
-}
-
 static inline int
-get_entry_n(STRUCT_ENTRY *i,
+iptcb_get_entry_n(STRUCT_ENTRY *i,
 	    unsigned int number,
 	    unsigned int *pos,
 	    STRUCT_ENTRY **pe)
@@ -155,63 +216,521 @@
 	return 0;
 }
 
-static STRUCT_ENTRY *
-index2entry(TC_HANDLE_T h, unsigned int index)
+static inline STRUCT_ENTRY *
+iptcb_get_entry(TC_HANDLE_T h, unsigned int offset)
+{
+	return (STRUCT_ENTRY *)((char *)h->entries->entrytable + offset);
+}
+
+static unsigned int
+iptcb_entry2index(const TC_HANDLE_T h, const STRUCT_ENTRY *seek)
 {
 	unsigned int pos = 0;
-	STRUCT_ENTRY *ret = NULL;
 
-	ENTRY_ITERATE(h->entries.entrytable, h->entries.size,
-		      get_entry_n, index, &pos, &ret);
-
-	return ret;
+	if (ENTRY_ITERATE(h->entries->entrytable, h->entries->size,
+			  iptcb_get_number, seek, &pos) == 0) {
+		fprintf(stderr, "ERROR: offset %u not an entry!\n",
+			(unsigned int)((char *)seek - (char *)h->entries->entrytable));
+		abort();
+	}
+	return pos;
 }
 
 static inline STRUCT_ENTRY *
-get_entry(TC_HANDLE_T h, unsigned int offset)
+iptcb_offset2entry(TC_HANDLE_T h, unsigned int offset)
 {
-	return (STRUCT_ENTRY *)((char *)h->entries.entrytable + offset);
+	return (STRUCT_ENTRY *) ((void *)h->entries->entrytable+offset);
 }
 
+
 static inline unsigned long
-entry2offset(const TC_HANDLE_T h, const STRUCT_ENTRY *e)
+iptcb_entry2offset(const TC_HANDLE_T h, const STRUCT_ENTRY *e)
 {
-	return (void *)e - (void *)h->entries.entrytable;
-}
-
-static inline unsigned long
-index2offset(TC_HANDLE_T h, unsigned int index)
-{
-	return entry2offset(h, index2entry(h, index));
-}
-
-static inline STRUCT_ENTRY *
-offset2entry(TC_HANDLE_T h, unsigned int offset)
-{
-	return (STRUCT_ENTRY *) ((void *)h->entries.entrytable+offset);
+	return (void *)e - (void *)h->entries->entrytable;
 }
 
 static inline unsigned int
-offset2index(const TC_HANDLE_T h, unsigned int offset)
+iptcb_offset2index(const TC_HANDLE_T h, unsigned int offset)
 {
-	return entry2index(h, offset2entry(h, offset));
+	return iptcb_entry2index(h, iptcb_offset2entry(h, offset));
+}
+
+/* Returns 0 if not hook entry, else hooknumber + 1 */
+static inline unsigned int
+iptcb_ent_is_hook_entry(STRUCT_ENTRY *e, TC_HANDLE_T h)
+{
+	unsigned int i;
+
+	for (i = 0; i < NUMHOOKS; i++) {
+		if ((h->info.valid_hooks & (1 << i))
+		    && iptcb_get_entry(h, h->info.hook_entry[i]) == e)
+			return i+1;
+	}
+	return 0;
 }
 
 
-static const char *
-get_errorlabel(TC_HANDLE_T h, unsigned int offset)
-{
-	STRUCT_ENTRY *e;
+/**********************************************************************
+ * iptc cache utility functions (iptcc_*)
+ **********************************************************************/
 
-	e = get_entry(h, offset);
-	if (strcmp(GET_TARGET(e)->u.user.name, ERROR_TARGET) != 0) {
-		fprintf(stderr, "ERROR: offset %u not an error node!\n",
-			offset);
-		abort();
+/* Is the given chain builtin (1) or user-defined (0) */
+static unsigned int iptcc_is_builtin(struct chain_head *c)
+{
+	return (c->hooknum ? 1 : 0);
+}
+
+/* Get a specific rule within a chain */
+static struct rule_head *iptcc_get_rule_num(struct chain_head *c,
+					    unsigned int rulenum)
+{
+	struct rule_head *r;
+	unsigned int num = 0;
+
+	list_for_each_entry(r, &c->rules, list) {
+		num++;
+		if (num == rulenum)
+			return r;
+	}
+	return NULL;
+}
+
+/* Returns chain head if found, otherwise NULL. */
+static struct chain_head *
+iptcc_find_chain_by_offset(TC_HANDLE_T handle, unsigned int offset)
+{
+	struct list_head *pos;
+
+	if (list_empty(&handle->chains))
+		return NULL;
+
+	list_for_each(pos, &handle->chains) {
+		struct chain_head *c = list_entry(pos, struct chain_head, list);
+		if (offset >= c->head_offset && offset <= c->foot_offset)
+			return c;
 	}
 
-	return (const char *)GET_TARGET(e)->data;
+	return NULL;
 }
+/* Returns chain head if found, otherwise NULL. */
+static struct chain_head *
+iptcc_find_label(const char *name, TC_HANDLE_T handle)
+{
+	struct list_head *pos;
+
+	if (list_empty(&handle->chains))
+		return NULL;
+
+	list_for_each(pos, &handle->chains) {
+		struct chain_head *c = list_entry(pos, struct chain_head, list);
+		if (!strcmp(c->name, name))
+			return c;
+	}
+
+	return NULL;
+}
+
+/* called when rule is to be removed from cache */
+static void iptcc_delete_rule(struct rule_head *r)
+{
+	DEBUGP("deleting rule %p (offset %u)\n", r, r->offset);
+	/* clean up reference count of called chain */
+	if (r->type == IPTCC_R_JUMP
+	    && r->jump)
+		r->jump->references--;
+
+	list_del(&r->list);
+	free(r);
+}
+
+
+/**********************************************************************
+ * RULESET PARSER (blob -> cache)
+ **********************************************************************/
+
+static int alphasort(const void *a, const void *b)
+{
+	return strcmp(((struct chain_head *)a)->name,
+		      ((struct chain_head *)b)->name);
+}
+
+/* Delete policy rule of previous chain, since cache doesn't contain
+ * chain policy rules.
+ * WARNING: This function has ugly design and relies on a lot of context, only
+ * to be called from specific places within the parser */
+static int __iptcc_p_del_policy(TC_HANDLE_T h, unsigned int num)
+{
+	if (h->chain_iterator_cur) {
+		/* policy rule is last rule */
+		struct rule_head *pr = (struct rule_head *)
+			h->chain_iterator_cur->rules.prev;
+
+		/* save verdict */
+		h->chain_iterator_cur->verdict = 
+			*(int *)GET_TARGET(pr->entry)->data;
+
+		/* save counter and counter_map information */
+		h->chain_iterator_cur->counter_map.maptype = 
+						COUNTER_MAP_NORMAL_MAP;
+		h->chain_iterator_cur->counter_map.mappos = num-1;
+		memcpy(&h->chain_iterator_cur->counters, &pr->entry->counters, 
+			sizeof(h->chain_iterator_cur->counters));
+
+		/* foot_offset points to verdict rule */
+		h->chain_iterator_cur->foot_index = num;
+		h->chain_iterator_cur->foot_offset = pr->offset;
+
+		/* delete rule from cache */
+		iptcc_delete_rule(pr);
+
+		return 1;
+	}
+	return 0;
+}
+
+/* Another ugly helper function split out of cache_add_entry to make it less
+ * spaghetti code */
+static void __iptcc_p_add_chain(TC_HANDLE_T h, struct chain_head *c,
+				unsigned int offset, unsigned int *num)
+{
+	__iptcc_p_del_policy(h, *num);
+
+	c->head_offset = offset;
+	c->index = *num;
+
+	list_add_tail(&c->list, &h->chains);
+	h->chain_iterator_cur = c;
+}
+
+/* main parser function: add an entry from the blob to the cache */
+static int cache_add_entry(STRUCT_ENTRY *e, 
+			   TC_HANDLE_T h, 
+			   STRUCT_ENTRY **prev,
+			   unsigned int *num)
+{
+	unsigned int builtin;
+	unsigned int offset = (char *)e - (char *)h->entries->entrytable;
+
+	DEBUGP("entering...");
+
+	/* Last entry ("policy rule"). End it.*/
+	if (iptcb_entry2offset(h,e) + e->next_offset == h->entries->size) {
+		/* This is the ERROR node at the end of the chain */
+		DEBUGP_C("%u:%u: end of table:\n", *num, offset);
+
+		__iptcc_p_del_policy(h, *num);
+
+		h->chain_iterator_cur = NULL;
+		goto out_inc;
+	}
+
+	/* We know this is the start of a new chain if it's an ERROR
+	 * target, or a hook entry point */
+
+	if (strcmp(GET_TARGET(e)->u.user.name, ERROR_TARGET) == 0) {
+		struct chain_head *c = 
+			iptcc_alloc_chain_head((const char *)GET_TARGET(e)->data, 0);
+		DEBUGP_C("%u:%u:new userdefined chain %s: %p\n", *num, offset, 
+			(char *)c->name, c);
+		if (!c) {
+			errno = -ENOMEM;
+			return -1;
+		}
+
+		__iptcc_p_add_chain(h, c, offset, num);
+
+	} else if ((builtin = iptcb_ent_is_hook_entry(e, h)) != 0) {
+		struct chain_head *c =
+			iptcc_alloc_chain_head((char *)hooknames[builtin-1], 
+						builtin);
+		DEBUGP_C("%u:%u new builtin chain: %p (rules=%p)\n", 
+			*num, offset, c, &c->rules);
+		if (!c) {
+			errno = -ENOMEM;
+			return -1;
+		}
+
+		c->hooknum = builtin;
+
+		__iptcc_p_add_chain(h, c, offset, num);
+
+		/* FIXME: this is ugly. */
+		goto new_rule;
+	} else {
+		/* has to be normal rule */
+		struct rule_head *r;
+new_rule:
+
+		if (!(r = iptcc_alloc_rule(h->chain_iterator_cur, 
+					   e->next_offset))) {
+			errno = ENOMEM;
+			return -1;
+		}
+		DEBUGP_C("%u:%u normal rule: %p: ", *num, offset, r);
+
+		r->index = *num;
+		r->offset = offset;
+		memcpy(r->entry, e, e->next_offset);
+		r->counter_map.maptype = COUNTER_MAP_NORMAL_MAP;
+		r->counter_map.mappos = r->index;
+
+		/* handling of jumps, etc. */
+		if (!strcmp(GET_TARGET(e)->u.user.name, STANDARD_TARGET)) {
+			STRUCT_STANDARD_TARGET *t;
+
+			t = (STRUCT_STANDARD_TARGET *)GET_TARGET(e);
+			if (t->target.u.target_size
+			    != ALIGN(sizeof(STRUCT_STANDARD_TARGET))) {
+				errno = EINVAL;
+				return -1;
+			}
+
+			if (t->verdict < 0) {
+				DEBUGP_C("standard, verdict=%d\n", t->verdict);
+				r->type = IPTCC_R_STANDARD;
+			} else if (t->verdict == r->offset+e->next_offset) {
+				DEBUGP_C("fallthrough\n");
+				r->type = IPTCC_R_FALLTHROUGH;
+			} else {
+				DEBUGP_C("jump, target=%u\n", t->verdict);
+				r->type = IPTCC_R_JUMP;
+				/* Jump target fixup has to be deferred
+				 * until second pass, since we migh not
+				 * yet have parsed the target */
+			}
+		}
+
+		list_add_tail(&r->list, &h->chain_iterator_cur->rules);
+	}
+out_inc:
+	(*num)++;
+	return 0;
+}
+
+
+/* parse an iptables blob into it's pieces */
+static int parse_table(TC_HANDLE_T h)
+{
+	STRUCT_ENTRY *prev;
+	unsigned int num = 0;
+	struct chain_head *c;
+
+	/* First pass: over ruleset blob */
+	ENTRY_ITERATE(h->entries->entrytable, h->entries->size,
+			cache_add_entry, h, &prev, &num);
+
+	/* Second pass: fixup parsed data from first pass */
+	list_for_each_entry(c, &h->chains, list) {
+		struct rule_head *r;
+		list_for_each_entry(r, &c->rules, list) {
+			struct chain_head *c;
+			STRUCT_STANDARD_TARGET *t;
+
+			if (r->type != IPTCC_R_JUMP)
+				continue;
+
+			t = (STRUCT_STANDARD_TARGET *)GET_TARGET(r->entry);
+			c = iptcc_find_chain_by_offset(h, t->verdict);
+			if (!c)
+				return -1;
+			r->jump = c;
+			c->references++;
+		}
+	}
+
+	/* FIXME: sort chains */
+
+	return 1;
+}
+
+
+/**********************************************************************
+ * RULESET COMPILATION (cache -> blob)
+ **********************************************************************/
+
+/* Convenience structures */
+struct iptcb_chain_start{
+	STRUCT_ENTRY e;
+	struct ipt_error_target name;
+};
+#define IPTCB_CHAIN_START_SIZE	(sizeof(STRUCT_ENTRY) +			\
+				 ALIGN(sizeof(struct ipt_error_target)))
+
+struct iptcb_chain_foot {
+	STRUCT_ENTRY e;
+	STRUCT_STANDARD_TARGET target;
+};
+#define IPTCB_CHAIN_FOOT_SIZE	(sizeof(STRUCT_ENTRY) +			\
+				 ALIGN(sizeof(STRUCT_STANDARD_TARGET)))
+
+struct iptcb_chain_error {
+	STRUCT_ENTRY entry;
+	struct ipt_error_target target;
+};
+#define IPTCB_CHAIN_ERROR_SIZE	(sizeof(STRUCT_ENTRY) +			\
+				 ALIGN(sizeof(struct ipt_error_target)))
+
+
+
+/* compile rule from cache into blob */
+static inline int iptcc_compile_rule (TC_HANDLE_T h, STRUCT_REPLACE *repl, struct rule_head *r)
+{
+	/* handle jumps */
+	if (r->type == IPTCC_R_JUMP) {
+		STRUCT_STANDARD_TARGET *t;
+		t = (STRUCT_STANDARD_TARGET *)GET_TARGET(r->entry);
+		/* memset for memcmp convenience on delete/replace */
+		memset(t->target.u.user.name, 0, FUNCTION_MAXNAMELEN);
+		strcpy(t->target.u.user.name, STANDARD_TARGET);
+		/* Jumps can only happen to builtin chains, so we
+		 * can safely assume that they always have a header */
+		t->verdict = r->jump->head_offset + IPTCB_CHAIN_START_SIZE;
+	} else if (r->type == IPTCC_R_FALLTHROUGH) {
+		STRUCT_STANDARD_TARGET *t;
+		t = (STRUCT_STANDARD_TARGET *)GET_TARGET(r->entry);
+		t->verdict = r->offset + r->size;
+	}
+	
+	/* copy entry from cache to blob */
+	memcpy((char *)repl->entries+r->offset, r->entry, r->size);
+
+	return 1;
+}
+
+/* compile chain from cache into blob */
+static int iptcc_compile_chain(TC_HANDLE_T h, STRUCT_REPLACE *repl, struct chain_head *c)
+{
+	int ret;
+	struct rule_head *r;
+	struct iptcb_chain_start *head;
+	struct iptcb_chain_foot *foot;
+
+	/* only user-defined chains have heaer */
+	if (!iptcc_is_builtin(c)) {
+		/* put chain header in place */
+		head = (void *)repl->entries + c->head_offset;
+		head->e.target_offset = sizeof(STRUCT_ENTRY);
+		head->e.next_offset = IPTCB_CHAIN_START_SIZE;
+		strcpy(head->name.t.u.user.name, ERROR_TARGET);
+		head->name.t.u.target_size = 
+				ALIGN(sizeof(struct ipt_error_target));
+		strcpy(head->name.error, c->name);
+	} else {
+		repl->hook_entry[c->hooknum-1] = c->head_offset;	
+		repl->underflow[c->hooknum-1] = c->foot_offset;
+	}
+
+	/* iterate over rules */
+	list_for_each_entry(r, &c->rules, list) {
+		ret = iptcc_compile_rule(h, repl, r);
+		if (ret < 0)
+			return ret;
+	}
+
+	/* put chain footer in place */
+	foot = (void *)repl->entries + c->foot_offset;
+	foot->e.target_offset = sizeof(STRUCT_ENTRY);
+	foot->e.next_offset = IPTCB_CHAIN_FOOT_SIZE;
+	strcpy(foot->target.target.u.user.name, STANDARD_TARGET);
+	foot->target.target.u.target_size =
+				ALIGN(sizeof(STRUCT_STANDARD_TARGET));
+	/* builtin targets have verdict, others return */
+	if (iptcc_is_builtin(c))
+		foot->target.verdict = c->verdict;
+	else
+		foot->target.verdict = RETURN;
+	/* set policy-counters */
+	memcpy(&foot->e.counters, &c->counters, sizeof(STRUCT_COUNTERS));
+
+	return 0;
+}
+
+/* calculate offset and number for every rule in the cache */
+static int iptcc_compile_chain_offsets(TC_HANDLE_T h, struct chain_head *c,
+				       int *offset, int *num)
+{
+	struct rule_head *r;
+
+	c->head_offset = *offset;
+	DEBUGP("%s: chain_head %u, offset=%u\n", c->name, *num, *offset);
+
+	if (!iptcc_is_builtin(c))  {
+		/* Chain has header */
+		*offset += sizeof(STRUCT_ENTRY) 
+			     + ALIGN(sizeof(struct ipt_error_target));
+		(*num)++;
+	}
+
+	list_for_each_entry(r, &c->rules, list) {
+		DEBUGP("rule %u, offset=%u, index=%u\n", *num, *offset, *num);
+		r->offset = *offset;
+		r->index = *num;
+		*offset += r->size;
+		(*num)++;
+	}
+
+	DEBUGP("%s; chain_foot %u, offset=%u, index=%u\n", c->name, *num, 
+		*offset, *num);
+	c->foot_offset = *offset;
+	c->foot_index = *num;
+	*offset += sizeof(STRUCT_ENTRY)
+		   + ALIGN(sizeof(STRUCT_STANDARD_TARGET));
+	(*num)++;
+
+	return 1;
+}
+
+/* put the pieces back together again */
+static int iptcc_compile_table_prep(TC_HANDLE_T h, unsigned int *size)
+{
+	struct chain_head *c;
+	unsigned int offset = 0, num = 0;
+	int ret = 0;
+
+	/* First pass: calculate offset for every rule */
+	list_for_each_entry(c, &h->chains, list) {
+		ret = iptcc_compile_chain_offsets(h, c, &offset, &num);
+		if (ret < 0)
+			return ret;
+	}
+
+	/* Append one error rule at end of chain */
+	num++;
+	offset += sizeof(STRUCT_ENTRY)
+		  + ALIGN(sizeof(struct ipt_error_target));
+
+	/* ruleset size is now in offset */
+	*size = offset;
+	return num;
+}
+
+static int iptcc_compile_table(TC_HANDLE_T h, STRUCT_REPLACE *repl)
+{
+	struct chain_head *c;
+	struct iptcb_chain_error *error;
+
+	/* Second pass: copy from cache to offsets, fill in jumps */
+	list_for_each_entry(c, &h->chains, list) {
+		int ret = iptcc_compile_chain(h, repl, c);
+		if (ret < 0)
+			return ret;
+	}
+
+	/* Append error rule at end of chain */
+	error = (void *)repl->entries + repl->size - IPTCB_CHAIN_ERROR_SIZE;
+	error->entry.target_offset = sizeof(STRUCT_ENTRY);
+	error->entry.next_offset = IPTCB_CHAIN_ERROR_SIZE;
+	error->target.t.u.user.target_size = 
+		ALIGN(sizeof(struct ipt_error_target));
+	strcpy((char *)&error->target.t.u.user.name, ERROR_TARGET);
+	strcpy((char *)&error->target.error, "ERROR");
+
+	return 1;
+}
+
+/**********************************************************************
+ * EXTERNAL API (operates on cache only)
+ **********************************************************************/
 
 /* Allocate handle of given size */
 static TC_HANDLE_T
@@ -220,33 +739,37 @@
 	size_t len;
 	TC_HANDLE_T h;
 
-	len = sizeof(STRUCT_TC_HANDLE)
-		+ size
-		+ num_rules * sizeof(struct counter_map);
+	len = sizeof(STRUCT_TC_HANDLE) + size;
 
-	if ((h = malloc(len)) == NULL) {
+	h = malloc(sizeof(STRUCT_TC_HANDLE));
+	if (!h) {
 		errno = ENOMEM;
 		return NULL;
 	}
-
-	h->changed = 0;
-	h->cache_num_chains = 0;
-	h->cache_chain_heads = NULL;
-	h->counter_map = (void *)h
-		+ sizeof(STRUCT_TC_HANDLE)
-		+ size;
+	memset(h, 0, sizeof(*h));
+	INIT_LIST_HEAD(&h->chains);
 	strcpy(h->info.name, tablename);
-	strcpy(h->entries.name, tablename);
+
+	h->entries = malloc(size);
+	if (!h->entries)
+		goto out_free_handle;
+
+	strcpy(h->entries->name, tablename);
 
 	return h;
+
+out_free_handle:
+	free(h);
+
+	return NULL;
 }
 
+
 TC_HANDLE_T
 TC_INIT(const char *tablename)
 {
 	TC_HANDLE_T h;
 	STRUCT_GETINFO info;
-	unsigned int i;
 	int tmp;
 	socklen_t s;
 
@@ -272,6 +795,9 @@
 	if (getsockopt(sockfd, TC_IPPROTO, SO_GET_INFO, &info, &s) < 0)
 		return NULL;
 
+	DEBUGP("valid_hooks=0x%08x, num_entries=%u, size=%u\n",
+		info.valid_hooks, info.num_entries, info.size);
+
 	if ((h = alloc_handle(info.name, info.size, info.num_entries))
 	    == NULL) {
 		close(sockfd);
@@ -279,54 +805,59 @@
 		return NULL;
 	}
 
-/* Too hard --RR */
-#if 0
-	sprintf(pathname, "%s/%s", IPT_LIB_DIR, info.name);
-	dynlib = dlopen(pathname, RTLD_NOW);
-	if (!dynlib) {
-		errno = ENOENT;
-		return NULL;
-	}
-	h->hooknames = dlsym(dynlib, "hooknames");
-	if (!h->hooknames) {
-		errno = ENOENT;
-		return NULL;
-	}
-#else
-	h->hooknames = hooknames;
-#endif
-
 	/* Initialize current state */
 	h->info = info;
-	h->new_number = h->info.num_entries;
-	for (i = 0; i < h->info.num_entries; i++)
-		h->counter_map[i]
-			= ((struct counter_map){COUNTER_MAP_NORMAL_MAP, i});
 
-	h->entries.size = h->info.size;
+	h->entries->size = h->info.size;
 
 	tmp = sizeof(STRUCT_GET_ENTRIES) + h->info.size;
 
-	if (getsockopt(sockfd, TC_IPPROTO, SO_GET_ENTRIES, &h->entries,
-		       &tmp) < 0) {
-		close(sockfd);
-		sockfd = -1;
-		free(h);
-		return NULL;
+	if (getsockopt(sockfd, TC_IPPROTO, SO_GET_ENTRIES, h->entries,
+		       &tmp) < 0)
+		goto error;
+
+#ifdef IPTC_DEBUG2
+	{
+		int fd = open("/tmp/libiptc-so_get_entries.blob", 
+				O_CREAT|O_WRONLY);
+		if (fd >= 0) {
+			write(fd, h->entries, tmp);
+			close(fd);
+		}
 	}
+#endif
+
+	if (parse_table(h) < 0)
+		goto error;
 
 	CHECK(h);
 	return h;
+error:
+	TC_FREE(&h);
+	return NULL;
 }
 
 void
 TC_FREE(TC_HANDLE_T *h)
 {
+	struct chain_head *c, *tmp;
+
 	close(sockfd);
 	sockfd = -1;
-	if ((*h)->cache_chain_heads)
-		free((*h)->cache_chain_heads);
+
+	list_for_each_entry_safe(c, tmp, &(*h)->chains, list) {
+		struct rule_head *r, *rtmp;
+
+		list_for_each_entry_safe(r, rtmp, &c->rules, list) {
+			free(r);
+		}
+
+		free(c);
+	}
+
+	free((*h)->entries);
 	free(*h);
+
 	*h = NULL;
 }
 
@@ -343,10 +874,10 @@
 TC_DUMP_ENTRIES(const TC_HANDLE_T handle)
 {
 	CHECK(handle);
-
+#if 0
 	printf("libiptc v%s.  %u entries, %u bytes.\n",
 	       IPTABLES_VERSION,
-	       handle->new_number, handle->entries.size);
+	       handle->new_number, handle->entries->size);
 	printf("Table `%s'\n", handle->info.name);
 	printf("Hooks: pre/in/fwd/out/post = %u/%u/%u/%u/%u\n",
 	       handle->info.hook_entry[HOOK_PRE_ROUTING],
@@ -361,206 +892,18 @@
 	       handle->info.underflow[HOOK_LOCAL_OUT],
 	       handle->info.underflow[HOOK_POST_ROUTING]);
 
-	ENTRY_ITERATE(handle->entries.entrytable, handle->entries.size,
+	ENTRY_ITERATE(handle->entries->entrytable, handle->entries->size,
 		      dump_entry, handle);
-}
-
-/* Returns 0 if not hook entry, else hooknumber + 1 */
-static inline unsigned int
-is_hook_entry(STRUCT_ENTRY *e, TC_HANDLE_T h)
-{
-	unsigned int i;
-
-	for (i = 0; i < NUMHOOKS; i++) {
-		if ((h->info.valid_hooks & (1 << i))
-		    && get_entry(h, h->info.hook_entry[i]) == e)
-			return i+1;
-	}
-	return 0;
-}
-
-static inline int
-add_chain(STRUCT_ENTRY *e, TC_HANDLE_T h, STRUCT_ENTRY **prev)
-{
-	unsigned int builtin;
-
-	/* Last entry.  End it. */
-	if (entry2offset(h, e) + e->next_offset == h->entries.size) {
-		/* This is the ERROR node at end of the table */
-		h->cache_chain_heads[h->cache_num_chains-1].end_off = 
-			entry2offset(h, *prev);
-		return 0;
-	}
-
-	/* We know this is the start of a new chain if it's an ERROR
-	   target, or a hook entry point */
-	if (strcmp(GET_TARGET(e)->u.user.name, ERROR_TARGET) == 0) {
-		/* prev was last entry in previous chain */
-		h->cache_chain_heads[h->cache_num_chains-1].end_off
-			= entry2offset(h, *prev);
-
-		strcpy(h->cache_chain_heads[h->cache_num_chains].name,
-		       (const char *)GET_TARGET(e)->data);
-		h->cache_chain_heads[h->cache_num_chains].start_off
-			= entry2offset(h, (void *)e + e->next_offset);
-		h->cache_num_chains++;
-	} else if ((builtin = is_hook_entry(e, h)) != 0) {
-		if (h->cache_num_chains > 0)
-			/* prev was last entry in previous chain */
-			h->cache_chain_heads[h->cache_num_chains-1].end_off
-				= entry2offset(h, *prev);
-
-		strcpy(h->cache_chain_heads[h->cache_num_chains].name,
-		       h->hooknames[builtin-1]);
-		h->cache_chain_heads[h->cache_num_chains].start_off
-			= entry2offset(h, (void *)e);
-		h->cache_num_chains++;
-	}
-
-	*prev = e;
-	return 0;
-}
-
-static int alphasort(const void *a, const void *b)
-{
-	return strcmp(((struct chain_cache *)a)->name,
-		      ((struct chain_cache *)b)->name);
-}
-
-static int populate_cache(TC_HANDLE_T h)
-{
-	unsigned int i;
-	STRUCT_ENTRY *prev;
-
-	/* # chains < # rules / 2 + num builtins - 1 */
-	h->cache_chain_heads = malloc((h->new_number / 2 + 4)
-				      * sizeof(struct chain_cache));
-	if (!h->cache_chain_heads) {
-		errno = ENOMEM;
-		return 0;
-	}
-
-	h->cache_num_chains = 0;
-	h->cache_num_builtins = 0;
-
-	/* Count builtins */
-	for (i = 0; i < NUMHOOKS; i++) {
-		if (h->info.valid_hooks & (1 << i))
-			h->cache_num_builtins++;
-	}
-
-	prev = NULL;
-	ENTRY_ITERATE(h->entries.entrytable, h->entries.size,
-		      add_chain, h, &prev);
-
-	qsort(h->cache_chain_heads + h->cache_num_builtins,
-	      h->cache_num_chains - h->cache_num_builtins,
-	      sizeof(struct chain_cache), alphasort);
-
-	return 1;
-}
-
-static int 
-correct_cache(TC_HANDLE_T h, unsigned int offset, int delta)
-{
-	int i;		/* needs to be signed because deleting first
-			   chain can make it drop to -1 */
-
-	if (!delta)
-		return 1;
-
-	for (i = 0; i < h->cache_num_chains; i++) {
-		struct chain_cache *cc = &h->cache_chain_heads[i];
-
-		if (delta < 0) {
-			/* take care about deleted chains */
-			if (cc->start_off > offset+delta
-			    && cc->end_off < offset) {
-				/* this chain is within the deleted range,
-				 * let's remove it from the cache */
-				void *start;
-				unsigned int size;
-
-				h->cache_num_chains--;
-
-				/* no need for memmove since we are 
-				 * removing the last entry */
-				if (i >= h->cache_num_chains)
-					continue;
-
-				start = &h->cache_chain_heads[i+1];
-				size = (h->cache_num_chains-i)
-					* sizeof(struct chain_cache);
-				memmove(cc, start, size);
-
-				/* iterate over same index again, since
-				 * it is now a different chain */
-				i--;
-				continue;
-			}
-		}
-
-		if (cc->start_off > offset)
-			cc->start_off += delta;
-
-		if (cc->end_off >= offset)
-			cc->end_off += delta;
-	}
-	/* HW_FIXME: sorting might be needed, but just in case a new chain was
-	 * added */
-
-	return 1;
-}
-
-static int
-add_chain_cache(TC_HANDLE_T h, const char *name, unsigned int start_off,
-		unsigned int end_off)
-{
-	struct chain_cache *ccs = realloc(h->cache_chain_heads, 
-					  (h->new_number / 2 + 4 + 1)
-					   * sizeof(struct chain_cache));
-	struct chain_cache *newcc;
-	
-	if (!ccs)
-		return 0;
-
-	h->cache_chain_heads = ccs;
-	newcc = &h->cache_chain_heads[h->cache_num_chains];
-	h->cache_num_chains++;
-
-	strncpy(newcc->name, name, TABLE_MAXNAMELEN-1);
-	newcc->name[TABLE_MAXNAMELEN-1] = '\0';
-	newcc->start_off = start_off;
-	newcc->end_off = end_off;
-
-	return 1;
-}
-
-/* Returns cache ptr if found, otherwise NULL. */
-static struct chain_cache *
-find_label(const char *name, TC_HANDLE_T handle)
-{
-	unsigned int i;
-
-	if (handle->cache_chain_heads == NULL
-	    && !populate_cache(handle))
-		return NULL;
-
-	/* FIXME: Linear search through builtins, then binary --RR */
-	for (i = 0; i < handle->cache_num_chains; i++) {
-		if (strcmp(handle->cache_chain_heads[i].name, name) == 0)
-			return &handle->cache_chain_heads[i];
-	}
-
-	return NULL;
+#endif
 }
 
 /* Does this chain exist? */
 int TC_IS_CHAIN(const char *chain, const TC_HANDLE_T handle)
 {
-	return find_label(chain, handle) != NULL;
+	return iptcc_find_label(chain, handle) != NULL;
 }
 
+#if 0
 /* Returns the position of the final (ie. unconditional) element. */
 static unsigned int
 get_chain_end(const TC_HANDLE_T handle, unsigned int start)
@@ -597,166 +940,234 @@
 		handle->entries.size, off);
 	abort();
 }
+#endif
+
+static void iptcc_chain_iterator_advance(TC_HANDLE_T handle)
+{
+	struct chain_head *c = handle->chain_iterator_cur;
+
+	if (c->list.next == &handle->chains)
+		handle->chain_iterator_cur = NULL;
+	else
+		handle->chain_iterator_cur = 
+			list_entry(c->list.next, struct chain_head, list);
+}
 
 /* Iterator functions to run through the chains. */
 const char *
 TC_FIRST_CHAIN(TC_HANDLE_T *handle)
 {
-	if ((*handle)->cache_chain_heads == NULL
-	    && !populate_cache(*handle))
+	struct chain_head *c = list_entry((*handle)->chains.next,
+					  struct chain_head, list);
+
+	iptc_fn = TC_FIRST_CHAIN;
+
+
+	if (list_empty(&(*handle)->chains)) {
+		DEBUGP(": no chains\n");
 		return NULL;
+	}
 
-	(*handle)->cache_chain_iteration
-		= &(*handle)->cache_chain_heads[0];
+	(*handle)->chain_iterator_cur = c;
+	iptcc_chain_iterator_advance(*handle);
 
-	return (*handle)->cache_chain_iteration->name;
+	DEBUGP(": returning `%s'\n", c->name);
+	return c->name;
 }
 
 /* Iterator functions to run through the chains.  Returns NULL at end. */
 const char *
 TC_NEXT_CHAIN(TC_HANDLE_T *handle)
 {
-	(*handle)->cache_chain_iteration++;
+	struct chain_head *c = (*handle)->chain_iterator_cur;
 
-	if ((*handle)->cache_chain_iteration - (*handle)->cache_chain_heads
-	    == (*handle)->cache_num_chains)
+	iptc_fn = TC_NEXT_CHAIN;
+
+	if (!c) {
+		DEBUGP(": no more chains\n");
 		return NULL;
+	}
 
-	return (*handle)->cache_chain_iteration->name;
+	iptcc_chain_iterator_advance(*handle);
+	
+	DEBUGP(": returning `%s'\n", c->name);
+	return c->name;
 }
 
 /* Get first rule in the given chain: NULL for empty chain. */
 const STRUCT_ENTRY *
 TC_FIRST_RULE(const char *chain, TC_HANDLE_T *handle)
 {
-	struct chain_cache *c;
+	struct chain_head *c;
+	struct rule_head *r;
 
-	c = find_label(chain, *handle);
+	iptc_fn = TC_FIRST_RULE;
+
+	DEBUGP("first rule(%s): ", chain);
+
+	c = iptcc_find_label(chain, *handle);
 	if (!c) {
 		errno = ENOENT;
 		return NULL;
 	}
 
 	/* Empty chain: single return/policy rule */
-	if (c->start_off == c->end_off)
+	if (list_empty(&c->rules)) {
+		DEBUGP_C("no rules, returning NULL\n");
 		return NULL;
+	}
 
-	(*handle)->cache_rule_end = offset2entry(*handle, c->end_off);
-	return offset2entry(*handle, c->start_off);
+	r = list_entry(c->rules.next, struct rule_head, list);
+	(*handle)->rule_iterator_cur = r;
+	DEBUGP_C("%p\n", r);
+
+	return r->entry;
 }
 
 /* Returns NULL when rules run out. */
 const STRUCT_ENTRY *
 TC_NEXT_RULE(const STRUCT_ENTRY *prev, TC_HANDLE_T *handle)
 {
-	if ((void *)prev + prev->next_offset
-	    == (void *)(*handle)->cache_rule_end)
-		return NULL;
+	struct rule_head *r;
 
-	return (void *)prev + prev->next_offset;
+	DEBUGP("rule_iterator_cur=%p...", (*handle)->rule_iterator_cur);
+
+	if (!(*handle)->rule_iterator_cur) {
+		DEBUGP_C("returning NULL\n");
+		return NULL;
+	}
+	
+	r = list_entry((*handle)->rule_iterator_cur->list.next, 
+			struct rule_head, list);
+
+	iptc_fn = TC_NEXT_RULE;
+
+	DEBUGP_C("next=%p, head=%p...", &r->list, 
+		&(*handle)->rule_iterator_cur->chain->rules);
+
+	if (&r->list == &(*handle)->rule_iterator_cur->chain->rules) {
+		(*handle)->rule_iterator_cur = NULL;
+		DEBUGP_C("finished, returning NULL\n");
+		return NULL;
+	}
+
+	(*handle)->rule_iterator_cur = r;
+
+	/* NOTE: prev is without any influence ! */
+	DEBUGP_C("returning rule %p\n", r);
+	return r->entry;
 }
 
-#if 0
 /* How many rules in this chain? */
 unsigned int
 TC_NUM_RULES(const char *chain, TC_HANDLE_T *handle)
 {
-	unsigned int off = 0;
-	STRUCT_ENTRY *start, *end;
-
+	struct chain_head *c;
+	iptc_fn = TC_NUM_RULES;
 	CHECK(*handle);
-	if (!find_label(&off, chain, *handle)) {
+
+	c = iptcc_find_label(chain, *handle);
+	if (!c) {
 		errno = ENOENT;
 		return (unsigned int)-1;
 	}
-
-	start = get_entry(*handle, off);
-	end = get_entry(*handle, get_chain_end(*handle, off));
-
-	return entry2index(*handle, end) - entry2index(*handle, start);
+	
+	return c->num_rules;
 }
 
-/* Get n'th rule in this chain. */
 const STRUCT_ENTRY *TC_GET_RULE(const char *chain,
 				unsigned int n,
 				TC_HANDLE_T *handle)
 {
-	unsigned int pos = 0, chainindex;
+	struct chain_head *c;
+	struct rule_head *r;
+	
+	iptc_fn = TC_GET_RULE;
 
 	CHECK(*handle);
-	if (!find_label(&pos, chain, *handle)) {
+
+	c = iptcc_find_label(chain, *handle);
+	if (!c) {
 		errno = ENOENT;
 		return NULL;
 	}
 
-	chainindex = entry2index(*handle, get_entry(*handle, pos));
-
-	return index2entry(*handle, chainindex + n);
-}
-#endif
-
-static const char *
-target_name(TC_HANDLE_T handle, const STRUCT_ENTRY *ce)
-{
-	int spos;
-	unsigned int labelidx;
-	STRUCT_ENTRY *jumpto;
-
-	/* To avoid const warnings */
-	STRUCT_ENTRY *e = (STRUCT_ENTRY *)ce;
-
-	if (strcmp(GET_TARGET(e)->u.user.name, STANDARD_TARGET) != 0)
-		return GET_TARGET(e)->u.user.name;
-
-	/* Standard target: evaluate */
-	spos = *(int *)GET_TARGET(e)->data;
-	if (spos < 0) {
-		if (spos == RETURN)
-			return LABEL_RETURN;
-		else if (spos == -NF_ACCEPT-1)
-			return LABEL_ACCEPT;
-		else if (spos == -NF_DROP-1)
-			return LABEL_DROP;
-		else if (spos == -NF_QUEUE-1)
-			return LABEL_QUEUE;
-
-		fprintf(stderr, "ERROR: off %lu/%u not a valid target (%i)\n",
-			entry2offset(handle, e), handle->entries.size,
-			spos);
-		abort();
-	}
-
-	jumpto = get_entry(handle, spos);
-
-	/* Fall through rule */
-	if (jumpto == (void *)e + e->next_offset)
-		return "";
-
-	/* Must point to head of a chain: ie. after error rule */
-	labelidx = entry2index(handle, jumpto) - 1;
-	return get_errorlabel(handle, index2offset(handle, labelidx));
+	r = iptcc_get_rule_num(c, n);
+	if (!r)
+		return NULL;
+	return r->entry;
 }
 
 /* Returns a pointer to the target name of this position. */
-const char *TC_GET_TARGET(const STRUCT_ENTRY *e,
-			  TC_HANDLE_T *handle)
+const char *standard_target_map(int verdict)
 {
-	return target_name(*handle, e);
+	switch (verdict) {
+		case RETURN:
+			return LABEL_RETURN;
+			break;
+		case -NF_ACCEPT-1:
+			return LABEL_ACCEPT;
+			break;
+		case -NF_DROP-1:
+			return LABEL_DROP;
+			break;
+		case -NF_QUEUE-1:
+			return LABEL_QUEUE;
+			break;
+		default:
+			fprintf(stderr, "ERROR: %d not a valid target)\n",
+				verdict);
+			abort();
+			break;
+	}
+	/* not reached */
+	return NULL;
 }
 
+/* Returns a pointer to the target name of this position. */
+const char *TC_GET_TARGET(const STRUCT_ENTRY *ce,
+			  TC_HANDLE_T *handle)
+{
+	STRUCT_ENTRY *e = (STRUCT_ENTRY *)ce;
+	struct rule_head *r = container_of(e, struct rule_head, entry);
+
+	iptc_fn = TC_GET_TARGET;
+
+	switch(r->type) {
+		int spos;
+		case IPTCC_R_FALLTHROUGH:
+			return "";
+			break;
+		case IPTCC_R_JUMP:
+			DEBUGP("r=%p, jump=%p, name=`%s'\n", r, r->jump, r->jump->name);
+			return r->jump->name;
+			break;
+		case IPTCC_R_STANDARD:
+			spos = *(int *)GET_TARGET(e)->data;
+			DEBUGP("r=%p, spos=%d'\n", r, spos);
+			return standard_target_map(spos);
+			break;
+		case IPTCC_R_MODULE:
+			return GET_TARGET(e)->u.user.name;
+			break;
+	}
+	return NULL;
+}
 /* Is this a built-in chain?  Actually returns hook + 1. */
 int
 TC_BUILTIN(const char *chain, const TC_HANDLE_T handle)
 {
-	unsigned int i;
+	struct chain_head *c;
+	
+	iptc_fn = TC_BUILTIN;
 
-	for (i = 0; i < NUMHOOKS; i++) {
-		if ((handle->info.valid_hooks & (1 << i))
-		    && handle->hooknames[i]
-		    && strcmp(handle->hooknames[i], chain) == 0)
-			return i+1;
+	c = iptcc_find_label(chain, handle);
+	if (!c) {
+		errno = ENOENT;
+		return -1;
 	}
-	return 0;
+
+	return iptcc_is_builtin(c);
 }
 
 /* Get the policy of a given built-in chain */
@@ -765,201 +1176,30 @@
 	      STRUCT_COUNTERS *counters,
 	      TC_HANDLE_T *handle)
 {
-	unsigned int start;
-	STRUCT_ENTRY *e;
-	int hook;
+	struct chain_head *c;
 
-	hook = TC_BUILTIN(chain, *handle);
-	if (hook != 0)
-		start = (*handle)->info.hook_entry[hook-1];
-	else
+	iptc_fn = TC_GET_POLICY;
+
+	DEBUGP("called for chain %s\n", chain);
+
+	c = iptcc_find_label(chain, *handle);
+	if (!c) {
+		errno = ENOENT;
+		return NULL;
+	}
+
+	if (!iptcc_is_builtin(c))
 		return NULL;
 
-	e = get_entry(*handle, get_chain_end(*handle, start));
-	*counters = e->counters;
+	*counters = c->counters;
 
-	return target_name(*handle, e);
-}
-
-static inline int
-correct_verdict(STRUCT_ENTRY *e,
-		char *base,
-		unsigned int offset, int delta_offset)
-{
-	STRUCT_STANDARD_TARGET *t = (void *)GET_TARGET(e);
-	unsigned int curr = (char *)e - base;
-
-	/* Trap: insert of fall-through rule.  Don't change fall-through
-	   verdict to jump-over-next-rule. */
-	if (strcmp(t->target.u.user.name, STANDARD_TARGET) == 0
-	    && t->verdict > (int)offset
-	    && !(curr == offset &&
-		 t->verdict == curr + e->next_offset)) {
-		t->verdict += delta_offset;
-	}
-
-	return 0;
-}
-
-/* Adjusts standard verdict jump positions after an insertion/deletion. */
-static int
-set_verdict(unsigned int offset, int delta_offset, TC_HANDLE_T *handle)
-{
-	ENTRY_ITERATE((*handle)->entries.entrytable,
-		      (*handle)->entries.size,
-		      correct_verdict, (char *)(*handle)->entries.entrytable,
-		      offset, delta_offset);
-
-	set_changed(*handle);
-	return 1;
-}
-
-/* If prepend is set, then we are prepending to a chain: if the
- * insertion position is an entry point, keep the entry point. */
-static int
-insert_rules(unsigned int num_rules, unsigned int rules_size,
-	     const STRUCT_ENTRY *insert,
-	     unsigned int offset, unsigned int num_rules_offset,
-	     int prepend,
-	     TC_HANDLE_T *handle)
-{
-	TC_HANDLE_T newh;
-	STRUCT_GETINFO newinfo;
-	unsigned int i;
-
-	if (offset >= (*handle)->entries.size) {
-		errno = EINVAL;
-		return 0;
-	}
-
-	newinfo = (*handle)->info;
-
-	/* Fix up entry points. */
-	for (i = 0; i < NUMHOOKS; i++) {
-		/* Entry points to START of chain, so keep same if
-                   inserting on at that point. */
-		if ((*handle)->info.hook_entry[i] > offset)
-			newinfo.hook_entry[i] += rules_size;
-
-		/* Underflow always points to END of chain (policy),
-		   so if something is inserted at same point, it
-		   should be advanced. */
-		if ((*handle)->info.underflow[i] >= offset)
-			newinfo.underflow[i] += rules_size;
-	}
-
-	newh = alloc_handle((*handle)->info.name,
-			    (*handle)->entries.size + rules_size,
-			    (*handle)->new_number + num_rules);
-	if (!newh)
-		return 0;
-	newh->info = newinfo;
-
-	/* Copy pre... */
-	memcpy(newh->entries.entrytable, (*handle)->entries.entrytable,offset);
-	/* ... Insert new ... */
-	memcpy((char *)newh->entries.entrytable + offset, insert, rules_size);
-	/* ... copy post */
-	memcpy((char *)newh->entries.entrytable + offset + rules_size,
-	       (char *)(*handle)->entries.entrytable + offset,
-	       (*handle)->entries.size - offset);
-
-	/* Move counter map. */
-	/* Copy pre... */
-	memcpy(newh->counter_map, (*handle)->counter_map,
-	       sizeof(struct counter_map) * num_rules_offset);
-	/* ... copy post */
-	memcpy(newh->counter_map + num_rules_offset + num_rules,
-	       (*handle)->counter_map + num_rules_offset,
-	       sizeof(struct counter_map) * ((*handle)->new_number
-					     - num_rules_offset));
-	/* Set intermediates to no counter copy */
-	for (i = 0; i < num_rules; i++)
-		newh->counter_map[num_rules_offset+i]
-			= ((struct counter_map){ COUNTER_MAP_SET, 0 });
-
-	newh->new_number = (*handle)->new_number + num_rules;
-	newh->entries.size = (*handle)->entries.size + rules_size;
-	newh->hooknames = (*handle)->hooknames;
-
-	newh->cache_chain_heads = (*handle)->cache_chain_heads;
-	newh->cache_num_builtins = (*handle)->cache_num_builtins;
-	newh->cache_num_chains = (*handle)->cache_num_chains;
-	newh->cache_rule_end = (*handle)->cache_rule_end;
-	newh->cache_chain_iteration = (*handle)->cache_chain_iteration;
-	if (!correct_cache(newh, offset, rules_size)) {
-		free(newh);
-		return 0;
-	}
-
-	free(*handle);
-	*handle = newh;
-
-	return set_verdict(offset, rules_size, handle);
+	return standard_target_map(c->verdict);
 }
 
 static int
-delete_rules(unsigned int num_rules, unsigned int rules_size,
-	     unsigned int offset, unsigned int num_rules_offset,
-	     TC_HANDLE_T *handle)
+iptcc_standard_map(struct rule_head *r, int verdict)
 {
-	unsigned int i;
-
-	if (offset + rules_size > (*handle)->entries.size) {
-		errno = EINVAL;
-		return 0;
-	}
-
-	/* Fix up entry points. */
-	for (i = 0; i < NUMHOOKS; i++) {
-		/* In practice, we never delete up to a hook entry,
-		   since the built-in chains are always first,
-		   so these two are never equal */
-		if ((*handle)->info.hook_entry[i] >= offset + rules_size)
-			(*handle)->info.hook_entry[i] -= rules_size;
-		else if ((*handle)->info.hook_entry[i] > offset) {
-			fprintf(stderr, "ERROR: Deleting entry %u %u %u\n",
-				i, (*handle)->info.hook_entry[i], offset);
-			abort();
-		}
-
-		/* Underflow points to policy (terminal) rule in
-                   built-in, so sequality is valid here (when deleting
-                   the last rule). */
-		if ((*handle)->info.underflow[i] >= offset + rules_size)
-			(*handle)->info.underflow[i] -= rules_size;
-		else if ((*handle)->info.underflow[i] > offset) {
-			fprintf(stderr, "ERROR: Deleting uflow %u %u %u\n",
-				i, (*handle)->info.underflow[i], offset);
-			abort();
-		}
-	}
-
-	/* Move the rules down. */
-	memmove((char *)(*handle)->entries.entrytable + offset,
-		(char *)(*handle)->entries.entrytable + offset + rules_size,
-		(*handle)->entries.size - (offset + rules_size));
-
-	/* Move the counter map down. */
-	memmove(&(*handle)->counter_map[num_rules_offset],
-		&(*handle)->counter_map[num_rules_offset + num_rules],
-		sizeof(struct counter_map)
-		* ((*handle)->new_number - (num_rules + num_rules_offset)));
-
-	/* Fix numbers */
-	(*handle)->new_number -= num_rules;
-	(*handle)->entries.size -= rules_size;
-
-	/* Fix the chain cache */
-	if (!correct_cache(*handle, offset+rules_size, -(int)rules_size))
-		return 0;
-
-	return set_verdict(offset, -(int)rules_size, handle);
-}
-
-static int
-standard_map(STRUCT_ENTRY *e, int verdict)
-{
+	STRUCT_ENTRY *e = r->entry;
 	STRUCT_STANDARD_TARGET *t;
 
 	t = (STRUCT_STANDARD_TARGET *)GET_TARGET(e);
@@ -974,44 +1214,50 @@
 	strcpy(t->target.u.user.name, STANDARD_TARGET);
 	t->verdict = verdict;
 
+	r->type = IPTCC_R_STANDARD;
+
 	return 1;
 }
 
 static int
-map_target(const TC_HANDLE_T handle,
-	   STRUCT_ENTRY *e,
-	   unsigned int offset,
-	   STRUCT_ENTRY_TARGET *old)
+iptcc_map_target(const TC_HANDLE_T handle,
+	   struct rule_head *r)
 {
+	STRUCT_ENTRY *e = r->entry;
 	STRUCT_ENTRY_TARGET *t = GET_TARGET(e);
 
-	/* Save old target (except data, which we don't change, except for
-	   standard case, where we don't care). */
-	*old = *t;
-
 	/* Maybe it's empty (=> fall through) */
-	if (strcmp(t->u.user.name, "") == 0)
-		return standard_map(e, offset + e->next_offset);
+	if (strcmp(t->u.user.name, "") == 0) {
+		r->type = IPTCC_R_FALLTHROUGH;
+		return 1;
+	}
 	/* Maybe it's a standard target name... */
 	else if (strcmp(t->u.user.name, LABEL_ACCEPT) == 0)
-		return standard_map(e, -NF_ACCEPT - 1);
+		return iptcc_standard_map(r, -NF_ACCEPT - 1);
 	else if (strcmp(t->u.user.name, LABEL_DROP) == 0)
-		return standard_map(e, -NF_DROP - 1);
+		return iptcc_standard_map(r, -NF_DROP - 1);
 	else if (strcmp(t->u.user.name, LABEL_QUEUE) == 0)
-		return standard_map(e, -NF_QUEUE - 1);
+		return iptcc_standard_map(r, -NF_QUEUE - 1);
 	else if (strcmp(t->u.user.name, LABEL_RETURN) == 0)
-		return standard_map(e, RETURN);
+		return iptcc_standard_map(r, RETURN);
 	else if (TC_BUILTIN(t->u.user.name, handle)) {
 		/* Can't jump to builtins. */
 		errno = EINVAL;
 		return 0;
 	} else {
 		/* Maybe it's an existing chain name. */
-		struct chain_cache *c;
+		struct chain_head *c;
+		DEBUGP("trying to find chain `%s': ", t->u.user.name);
 
-		c = find_label(t->u.user.name, handle);
-		if (c)
-			return standard_map(e, c->start_off);
+		c = iptcc_find_label(t->u.user.name, handle);
+		if (c) {
+			DEBUGP_C("found!\n");
+			r->type = IPTCC_R_JUMP;
+			r->jump = c;
+			c->references++;
+			return 1;
+		}
+		DEBUGP_C("not found :(\n");
 	}
 
 	/* Must be a module?  If not, kernel will reject... */
@@ -1019,19 +1265,11 @@
 	memset(t->u.user.name + strlen(t->u.user.name),
 	       0,
 	       FUNCTION_MAXNAMELEN - strlen(t->u.user.name));
+
+	set_changed(handle);
 	return 1;
 }
 
-static void
-unmap_target(STRUCT_ENTRY *e, STRUCT_ENTRY_TARGET *old)
-{
-	STRUCT_ENTRY_TARGET *t = GET_TARGET(e);
-
-	/* Save old target (except data, which we don't change, except for
-	   standard case, where we don't care). */
-	*t = *old;
-}
-
 /* Insert the entry `fw' in chain `chain' into position `rulenum'. */
 int
 TC_INSERT_ENTRY(const IPT_CHAINLABEL chain,
@@ -1039,36 +1277,41 @@
 		unsigned int rulenum,
 		TC_HANDLE_T *handle)
 {
-	unsigned int chainindex, offset;
-	STRUCT_ENTRY_TARGET old;
-	struct chain_cache *c;
-	STRUCT_ENTRY *tmp;
-	int ret;
+	struct chain_head *c;
+	struct rule_head *r, *prev;
 
 	iptc_fn = TC_INSERT_ENTRY;
-	if (!(c = find_label(chain, *handle))) {
+
+	if (!(c = iptcc_find_label(chain, *handle))) {
 		errno = ENOENT;
 		return 0;
 	}
 
-	chainindex = offset2index(*handle, c->start_off);
-
-	tmp = index2entry(*handle, chainindex + rulenum);
-	if (!tmp || tmp > offset2entry(*handle, c->end_off)) {
+	prev = iptcc_get_rule_num(c, rulenum);
+	if (!prev) {
 		errno = E2BIG;
 		return 0;
 	}
-	offset = index2offset(*handle, chainindex + rulenum);
 
-	/* Mapping target actually alters entry, but that's
-           transparent to the caller. */
-	if (!map_target(*handle, (STRUCT_ENTRY *)e, offset, &old))
+	if (!(r = iptcc_alloc_rule(c, e->next_offset))) {
+		errno = ENOMEM;
 		return 0;
+	}
 
-	ret = insert_rules(1, e->next_offset, e, offset,
-			   chainindex + rulenum, rulenum == 0, handle);
-	unmap_target((STRUCT_ENTRY *)e, &old);
-	return ret;
+	memcpy(r->entry, e, e->next_offset);
+	r->counter_map.maptype = COUNTER_MAP_SET;
+
+	if (!iptcc_map_target(*handle, r)) {
+		free(r);
+		return 0;
+	}
+
+	list_add_tail(&r->list, &prev->list);
+	c->num_rules++;
+
+	set_changed(*handle);
+
+	return 1;
 }
 
 /* Atomically replace rule `rulenum' in `chain' with `fw'. */
@@ -1078,40 +1321,40 @@
 		 unsigned int rulenum,
 		 TC_HANDLE_T *handle)
 {
-	unsigned int chainindex, offset;
-	STRUCT_ENTRY_TARGET old;
-	struct chain_cache *c;
-	STRUCT_ENTRY *tmp;
-	int ret;
+	struct chain_head *c;
+	struct rule_head *r, *old;
 
 	iptc_fn = TC_REPLACE_ENTRY;
 
-	if (!(c = find_label(chain, *handle))) {
+	if (!(c = iptcc_find_label(chain, *handle))) {
 		errno = ENOENT;
 		return 0;
 	}
 
-	chainindex = offset2index(*handle, c->start_off);
-
-	tmp = index2entry(*handle, chainindex + rulenum);
-	if (!tmp || tmp >= offset2entry(*handle, c->end_off)) {
+	if (!(old = iptcc_get_rule_num(c, rulenum))) {
 		errno = E2BIG;
 		return 0;
 	}
 
-	offset = index2offset(*handle, chainindex + rulenum);
-	/* Replace = delete and insert. */
-	if (!delete_rules(1, get_entry(*handle, offset)->next_offset,
-			  offset, chainindex + rulenum, handle))
+	if (!(r = iptcc_alloc_rule(c, e->next_offset))) {
+		errno = ENOMEM;
 		return 0;
+	}
 
-	if (!map_target(*handle, (STRUCT_ENTRY *)e, offset, &old))
+	memcpy(r->entry, e, e->next_offset);
+	r->counter_map.maptype = COUNTER_MAP_SET;
+
+	if (!iptcc_map_target(*handle, r)) {
+		free(r);
 		return 0;
+	}
 
-	ret = insert_rules(1, e->next_offset, e, offset,
-			   chainindex + rulenum, 1, handle);
-	unmap_target((STRUCT_ENTRY *)e, &old);
-	return ret;
+	list_add(&r->list, &old->list);
+	iptcc_delete_rule(old);
+
+	set_changed(*handle);
+
+	return 1;
 }
 
 /* Append entry `fw' to chain `chain'.  Equivalent to insert with
@@ -1121,24 +1364,37 @@
 		const STRUCT_ENTRY *e,
 		TC_HANDLE_T *handle)
 {
-	struct chain_cache *c;
-	STRUCT_ENTRY_TARGET old;
-	int ret;
+	struct chain_head *c;
+	struct rule_head *r;
 
 	iptc_fn = TC_APPEND_ENTRY;
-	if (!(c = find_label(chain, *handle))) {
+	if (!(c = iptcc_find_label(chain, *handle))) {
+		DEBUGP("unable to find chain `%s'\n", chain);
 		errno = ENOENT;
 		return 0;
 	}
 
-	if (!map_target(*handle, (STRUCT_ENTRY *)e,
-			c->end_off, &old))
+	if (!(r = iptcc_alloc_rule(c, e->next_offset))) {
+		DEBUGP("unable to allocate rule for chain `%s'\n", chain);
+		errno = ENOMEM;
 		return 0;
+	}
 
-	ret = insert_rules(1, e->next_offset, e, c->end_off, 
-			   offset2index(*handle, c->end_off), 0, handle);
-	unmap_target((STRUCT_ENTRY *)e, &old);
-	return ret;
+	memcpy(r->entry, e, e->next_offset);
+	r->counter_map.maptype = COUNTER_MAP_SET;
+
+	if (!iptcc_map_target(*handle, r)) {
+		DEBUGP("unable to ma target of rule for chain `%s'\n", chain);
+		free(r);
+		return 0;
+	}
+
+	list_add_tail(&r->list, &c->rules);
+	c->num_rules++;
+
+	set_changed(*handle);
+
+	return 1;
 }
 
 static inline int
@@ -1181,6 +1437,7 @@
 
 	return 0;
 }
+#if 0
 
 static int
 is_same(const STRUCT_ENTRY *a,
@@ -1195,11 +1452,11 @@
 		TC_HANDLE_T *handle)
 {
 	unsigned int offset;
-	struct chain_cache *c;
+	struct chain_head *c;
 	STRUCT_ENTRY *e, *fw;
 
 	iptc_fn = TC_DELETE_ENTRY;
-	if (!(c = find_label(chain, *handle))) {
+	if (!(c = iptcc_find_label(chain, *handle))) {
 		errno = ENOENT;
 		return 0;
 	}
@@ -1241,6 +1498,18 @@
 	errno = ENOENT;
 	return 0;
 }
+#endif
+
+/* Delete the first rule in `chain' which matches `fw'. */
+int
+TC_DELETE_ENTRY(const IPT_CHAINLABEL chain,
+		const STRUCT_ENTRY *origfw,
+		unsigned char *matchmask,
+		TC_HANDLE_T *handle)
+{
+	errno = ENOSYS;
+	return 0;
+}
 
 /* Delete the rule in position `rulenum' in `chain'. */
 int
@@ -1248,33 +1517,34 @@
 		    unsigned int rulenum,
 		    TC_HANDLE_T *handle)
 {
-	unsigned int index;
-	int ret;
-	STRUCT_ENTRY *e;
-	struct chain_cache *c;
+	struct chain_head *c;
+	struct rule_head *r;
 
 	iptc_fn = TC_DELETE_NUM_ENTRY;
-	if (!(c = find_label(chain, *handle))) {
+
+	if (!(c = iptcc_find_label(chain, *handle))) {
 		errno = ENOENT;
 		return 0;
 	}
 
-	index = offset2index(*handle, c->start_off) + rulenum;
-
-	if (index >= offset2index(*handle, c->end_off)) {
+	if (!(r = iptcc_get_rule_num(c, rulenum))) {
 		errno = E2BIG;
 		return 0;
 	}
 
-	e = index2entry(*handle, index);
-	if (e == NULL) {
-		errno = EINVAL;
-		return 0;
+	/* If we are about to delete the rule that is the current
+	 * iterator, move rule iterator back.  next pointer will then
+	 * point to real next node */
+	if (r == (*handle)->rule_iterator_cur) {
+		(*handle)->rule_iterator_cur = 
+			list_entry((*handle)->rule_iterator_cur->list.prev,
+				   struct rule_head, list);
 	}
 
-	ret = delete_rules(1, e->next_offset, entry2offset(*handle, e),
-			   index, handle);
-	return ret;
+	c->num_rules--;
+	iptcc_delete_rule(r);
+
+	return 1;
 }
 
 /* Check the packet `fw' on chain `chain'.  Returns the verdict, or
@@ -1292,47 +1562,43 @@
 int
 TC_FLUSH_ENTRIES(const IPT_CHAINLABEL chain, TC_HANDLE_T *handle)
 {
-	unsigned int startindex, endindex;
-	STRUCT_ENTRY *startentry, *endentry;
-	struct chain_cache *c;
-	int ret;
+	struct chain_head *c;
+	struct rule_head *r, *tmp;
 
 	iptc_fn = TC_FLUSH_ENTRIES;
-	if (!(c = find_label(chain, *handle))) {
+	if (!(c = iptcc_find_label(chain, *handle))) {
 		errno = ENOENT;
 		return 0;
 	}
-	startindex = offset2index(*handle, c->start_off);
-	endindex = offset2index(*handle, c->end_off);
-	startentry = offset2entry(*handle, c->start_off);
-	endentry = offset2entry(*handle, c->end_off);
 
-	ret = delete_rules(endindex - startindex,
-			   (char *)endentry - (char *)startentry,
-			   c->start_off, startindex,
-			   handle);
-	return ret;
+	list_for_each_entry_safe(r, tmp, &c->rules, list) {
+		iptcc_delete_rule(r);
+	}
+
+	c->num_rules = 0;
+
+	set_changed(*handle);
+
+	return 1;
 }
 
 /* Zeroes the counters in a chain. */
 int
 TC_ZERO_ENTRIES(const IPT_CHAINLABEL chain, TC_HANDLE_T *handle)
 {
-	unsigned int i, end;
-	struct chain_cache *c;
+	struct chain_head *c;
+	struct rule_head *r;
 
-	if (!(c = find_label(chain, *handle))) {
+	if (!(c = iptcc_find_label(chain, *handle))) {
 		errno = ENOENT;
 		return 0;
 	}
 
-	i = offset2index(*handle, c->start_off);
-	end = offset2index(*handle, c->end_off);
-
-	for (; i <= end; i++) {
-		if ((*handle)->counter_map[i].maptype ==COUNTER_MAP_NORMAL_MAP)
-			(*handle)->counter_map[i].maptype = COUNTER_MAP_ZEROED;
+	list_for_each_entry(r, &c->rules, list) {
+		if (r->counter_map.maptype == COUNTER_MAP_NORMAL_MAP)
+			r->counter_map.maptype = COUNTER_MAP_ZEROED;
 	}
+
 	set_changed(*handle);
 
 	return 1;
@@ -1343,29 +1609,23 @@
 		unsigned int rulenum,
 		TC_HANDLE_T *handle)
 {
-	STRUCT_ENTRY *e;
-	struct chain_cache *c;
-	unsigned int chainindex, end;
+	struct chain_head *c;
+	struct rule_head *r;
 
 	iptc_fn = TC_READ_COUNTER;
 	CHECK(*handle);
 
-	if (!(c = find_label(chain, *handle))) {
+	if (!(c = iptcc_find_label(chain, *handle))) {
 		errno = ENOENT;
 		return NULL;
 	}
 
-	chainindex = offset2index(*handle, c->start_off);
-	end = offset2index(*handle, c->end_off);
-
-	if (chainindex + rulenum > end) {
+	if (!(r = iptcc_get_rule_num(c, rulenum))) {
 		errno = E2BIG;
 		return NULL;
 	}
 
-	e = index2entry(*handle, chainindex + rulenum);
-
-	return &e->counters;
+	return &r->entry[0].counters;
 }
 
 int
@@ -1373,33 +1633,24 @@
 		unsigned int rulenum,
 		TC_HANDLE_T *handle)
 {
-	STRUCT_ENTRY *e;
-	struct chain_cache *c;
-	unsigned int chainindex, end;
+	struct chain_head *c;
+	struct rule_head *r;
 	
 	iptc_fn = TC_ZERO_COUNTER;
 	CHECK(*handle);
 
-	if (!(c = find_label(chain, *handle))) {
+	if (!(c = iptcc_find_label(chain, *handle))) {
 		errno = ENOENT;
 		return 0;
 	}
 
-	chainindex = offset2index(*handle, c->start_off);
-	end = offset2index(*handle, c->end_off);
-
-	if (chainindex + rulenum > end) {
+	if (!(r = iptcc_get_rule_num(c, rulenum))) {
 		errno = E2BIG;
 		return 0;
 	}
 
-	e = index2entry(*handle, chainindex + rulenum);
-
-	if ((*handle)->counter_map[chainindex + rulenum].maptype
-			== COUNTER_MAP_NORMAL_MAP) {
-		(*handle)->counter_map[chainindex + rulenum].maptype
-			 = COUNTER_MAP_ZEROED;
-	}
+	if (r->counter_map.maptype == COUNTER_MAP_NORMAL_MAP)
+		r->counter_map.maptype = COUNTER_MAP_ZEROED;
 
 	set_changed(*handle);
 
@@ -1412,30 +1663,25 @@
 	       STRUCT_COUNTERS *counters,
 	       TC_HANDLE_T *handle)
 {
+	struct chain_head *c;
+	struct rule_head *r;
 	STRUCT_ENTRY *e;
-	struct chain_cache *c;
-	unsigned int chainindex, end;
 
 	iptc_fn = TC_SET_COUNTER;
 	CHECK(*handle);
 
-	if (!(c = find_label(chain, *handle))) {
+	if (!(c = iptcc_find_label(chain, *handle))) {
 		errno = ENOENT;
 		return 0;
 	}
 
-	chainindex = offset2index(*handle, c->start_off);
-	end = offset2index(*handle, c->end_off);
-
-	if (chainindex + rulenum > end) {
+	if (!(r = iptcc_get_rule_num(c, rulenum))) {
 		errno = E2BIG;
 		return 0;
 	}
 
-	e = index2entry(*handle, chainindex + rulenum);
-
-	(*handle)->counter_map[chainindex + rulenum].maptype
-		= COUNTER_MAP_SET;
+	e = r->entry;
+	r->counter_map.maptype = COUNTER_MAP_SET;
 
 	memcpy(&e->counters, counters, sizeof(STRUCT_COUNTERS));
 
@@ -1450,82 +1696,42 @@
 int
 TC_CREATE_CHAIN(const IPT_CHAINLABEL chain, TC_HANDLE_T *handle)
 {
-	int ret;
-	struct {
-		STRUCT_ENTRY head;
-		struct ipt_error_target name;
-		STRUCT_ENTRY ret;
-		STRUCT_STANDARD_TARGET target;
-	} newc;
-	unsigned int destination;
+	static struct chain_head *c;
 
 	iptc_fn = TC_CREATE_CHAIN;
 
 	/* find_label doesn't cover built-in targets: DROP, ACCEPT,
            QUEUE, RETURN. */
-	if (find_label(chain, *handle)
+	if (iptcc_find_label(chain, *handle)
 	    || strcmp(chain, LABEL_DROP) == 0
 	    || strcmp(chain, LABEL_ACCEPT) == 0
 	    || strcmp(chain, LABEL_QUEUE) == 0
 	    || strcmp(chain, LABEL_RETURN) == 0) {
+		DEBUGP("Chain `%s' already exists\n", chain);
 		errno = EEXIST;
 		return 0;
 	}
 
 	if (strlen(chain)+1 > sizeof(IPT_CHAINLABEL)) {
+		DEBUGP("Chain name `%s' too long\n", chain);
 		errno = EINVAL;
 		return 0;
 	}
 
-	memset(&newc, 0, sizeof(newc));
-	newc.head.target_offset = sizeof(STRUCT_ENTRY);
-	newc.head.next_offset
-		= sizeof(STRUCT_ENTRY)
-		+ ALIGN(sizeof(struct ipt_error_target));
-	strcpy(newc.name.t.u.user.name, ERROR_TARGET);
-	newc.name.t.u.target_size = ALIGN(sizeof(struct ipt_error_target));
-	strcpy(newc.name.error, chain);
+	c = iptcc_alloc_chain_head(chain, 0);
+	if (!c) {
+		DEBUGP("Cannot allocate memory for chain `%s'\n", chain);
+		errno = ENOMEM;
+		return 0;
 
-	newc.ret.target_offset = sizeof(STRUCT_ENTRY);
-	newc.ret.next_offset
-		= sizeof(STRUCT_ENTRY)
-		+ ALIGN(sizeof(STRUCT_STANDARD_TARGET));
-	strcpy(newc.target.target.u.user.name, STANDARD_TARGET);
-	newc.target.target.u.target_size
-		= ALIGN(sizeof(STRUCT_STANDARD_TARGET));
-	newc.target.verdict = RETURN;
+	}
 
-	destination = index2offset(*handle, (*handle)->new_number -1);
-
-	/* Add just before terminal entry */
-	ret = insert_rules(2, sizeof(newc), &newc.head,
-			   destination,
-			   (*handle)->new_number - 1,
-			   0, handle);
+	DEBUGP("Creating chain `%s'\n", chain);
+	list_add_tail(&c->list, &(*handle)->chains);
 
 	set_changed(*handle);
 
-	/* add chain cache info for this chain */
-	add_chain_cache(*handle, chain, 
-			destination+newc.head.next_offset, 
-			destination+newc.head.next_offset);
-
-	return ret;
-}
-
-static int
-count_ref(STRUCT_ENTRY *e, unsigned int offset, unsigned int *ref)
-{
-	STRUCT_STANDARD_TARGET *t;
-
-	if (strcmp(GET_TARGET(e)->u.user.name, STANDARD_TARGET) == 0) {
-		t = (STRUCT_STANDARD_TARGET *)GET_TARGET(e);
-
-		if (t->verdict == offset)
-			(*ref)++;
-	}
-
-	return 0;
+	return 1;
 }
 
 /* Get the number of references to this chain. */
@@ -1533,17 +1739,15 @@
 TC_GET_REFERENCES(unsigned int *ref, const IPT_CHAINLABEL chain,
 		  TC_HANDLE_T *handle)
 {
-	struct chain_cache *c;
+	struct chain_head *c;
 
-	if (!(c = find_label(chain, *handle))) {
+	if (!(c = iptcc_find_label(chain, *handle))) {
 		errno = ENOENT;
 		return 0;
 	}
 
-	*ref = 0;
-	ENTRY_ITERATE((*handle)->entries.entrytable,
-		      (*handle)->entries.size,
-		      count_ref, c->start_off, ref);
+	*ref = c->references;
+
 	return 1;
 }
 
@@ -1551,48 +1755,53 @@
 int
 TC_DELETE_CHAIN(const IPT_CHAINLABEL chain, TC_HANDLE_T *handle)
 {
-	unsigned int labelidx, labeloff;
 	unsigned int references;
-	struct chain_cache *c;
-	int ret;
-	STRUCT_ENTRY *start;
-
-	if (!TC_GET_REFERENCES(&references, chain, handle))
-		return 0;
+	struct chain_head *c;
 
 	iptc_fn = TC_DELETE_CHAIN;
 
-	if (TC_BUILTIN(chain, *handle)) {
-		errno = EINVAL;
-		return 0;
-	}
-
-	if (references > 0) {
-		errno = EMLINK;
-		return 0;
-	}
-
-	if (!(c = find_label(chain, *handle))) {
+	if (!(c = iptcc_find_label(chain, *handle))) {
+		DEBUGP("cannot find chain `%s'\n", chain);
 		errno = ENOENT;
 		return 0;
 	}
 
-	if (c->start_off != c->end_off) {
+	if (TC_BUILTIN(chain, *handle)) {
+		DEBUGP("cannot remove builtin chain `%s'\n", chain);
+		errno = EINVAL;
+		return 0;
+	}
+
+	if (!TC_GET_REFERENCES(&references, chain, handle)) {
+		DEBUGP("cannot get references on chain `%s'\n", chain);
+		return 0;
+	}
+
+	if (references > 0) {
+		DEBUGP("chain `%s' still has references\n", chain);
+		errno = EMLINK;
+		return 0;
+	}
+
+	if (c->num_rules) {
+		DEBUGP("chain `%s' is not empty\n", chain);
 		errno = ENOTEMPTY;
 		return 0;
 	}
 
-	/* Need label index: preceeds chain start */
-	labelidx = offset2index(*handle, c->start_off) - 1;
-	labeloff = index2offset(*handle, labelidx);
+	/* If we are about to delete the chain that is the current
+	 * iterator, move chain iterator firward. */
+	if (c == (*handle)->chain_iterator_cur)
+		iptcc_chain_iterator_advance(*handle);
 
-	start = offset2entry(*handle, c->start_off);
+	list_del(&c->list);
+	free(c);
 
-	ret = delete_rules(2,
-			   get_entry(*handle, labeloff)->next_offset
-			   + start->next_offset,
-			   labeloff, labelidx, handle);
-	return ret;
+	DEBUGP("chain `%s' deleted\n", chain);
+
+	set_changed(*handle);
+
+	return 1;
 }
 
 /* Renames a chain. */
@@ -1600,15 +1809,12 @@
 		    const IPT_CHAINLABEL newname,
 		    TC_HANDLE_T *handle)
 {
-	unsigned int labeloff, labelidx;
-	struct chain_cache *c;
-	struct ipt_error_target *t;
-
+	struct chain_head *c;
 	iptc_fn = TC_RENAME_CHAIN;
 
 	/* find_label doesn't cover built-in targets: DROP, ACCEPT,
            QUEUE, RETURN. */
-	if (find_label(newname, *handle)
+	if (iptcc_find_label(newname, *handle)
 	    || strcmp(newname, LABEL_DROP) == 0
 	    || strcmp(newname, LABEL_ACCEPT) == 0
 	    || strcmp(newname, LABEL_QUEUE) == 0
@@ -1617,7 +1823,7 @@
 		return 0;
 	}
 
-	if (!(c = find_label(oldname, *handle))
+	if (!(c = iptcc_find_label(oldname, *handle))
 	    || TC_BUILTIN(oldname, *handle)) {
 		errno = ENOENT;
 		return 0;
@@ -1628,20 +1834,8 @@
 		return 0;
 	}
 
-	/* Need label index: preceeds chain start */
-	labelidx = offset2index(*handle, c->start_off) - 1;
-	labeloff = index2offset(*handle, labelidx);
-
-	t = (struct ipt_error_target *)
-		GET_TARGET(get_entry(*handle, labeloff));
-
-	memset(t->error, 0, sizeof(t->error));
-	strcpy(t->error, newname);
-
-	/* update chain cache */
-	memset(c->name, 0, sizeof(c->name));
-	strcpy(c->name, newname);
-
+	strncpy(c->name, newname, sizeof(IPT_CHAINLABEL));
+	
 	set_changed(*handle);
 
 	return 1;
@@ -1654,51 +1848,37 @@
 	      STRUCT_COUNTERS *counters,
 	      TC_HANDLE_T *handle)
 {
-	unsigned int hook;
-	unsigned int policyoff, ctrindex;
-	STRUCT_ENTRY *e;
-	STRUCT_STANDARD_TARGET *t;
+	struct chain_head *c;
 
 	iptc_fn = TC_SET_POLICY;
-	/* Figure out which chain. */
-	hook = TC_BUILTIN(chain, *handle);
-	if (hook == 0) {
-		errno = ENOENT;
-		return 0;
-	} else
-		hook--;
 
-	policyoff = get_chain_end(*handle, (*handle)->info.hook_entry[hook]);
-	if (policyoff != (*handle)->info.underflow[hook]) {
-		printf("ERROR: Policy for `%s' offset %u != underflow %u\n",
-		       chain, policyoff, (*handle)->info.underflow[hook]);
+	if (!(c = iptcc_find_label(chain, *handle))) {
+		DEBUGP("cannot find chain `%s'\n", chain);
+		errno = ENOENT;
 		return 0;
 	}
 
-	e = get_entry(*handle, policyoff);
-	t = (STRUCT_STANDARD_TARGET *)GET_TARGET(e);
+	if (!iptcc_is_builtin(c)) {
+		DEBUGP("cannot set policy of userdefinedchain `%s'\n", chain);
+		errno = ENOENT;
+		return 0;
+	}
 
 	if (strcmp(policy, LABEL_ACCEPT) == 0)
-		t->verdict = -NF_ACCEPT - 1;
+		c->verdict = -NF_ACCEPT - 1;
 	else if (strcmp(policy, LABEL_DROP) == 0)
-		t->verdict = -NF_DROP - 1;
+		c->verdict = -NF_DROP - 1;
 	else {
 		errno = EINVAL;
 		return 0;
 	}
 
-	ctrindex = entry2index(*handle, e);
-
 	if (counters) {
 		/* set byte and packet counters */
-		memcpy(&e->counters, counters, sizeof(STRUCT_COUNTERS));
-
-		(*handle)->counter_map[ctrindex].maptype
-			= COUNTER_MAP_SET;
-
+		memcpy(&c->counters, counters, sizeof(STRUCT_COUNTERS));
+		c->counter_map.maptype = COUNTER_MAP_SET;
 	} else {
-		(*handle)->counter_map[ctrindex]
-			= ((struct counter_map){ COUNTER_MAP_NOMAP, 0 });
+		c->counter_map.maptype = COUNTER_MAP_NOMAP;
 	}
 
 	set_changed(*handle);
@@ -1721,20 +1901,76 @@
 	answer->bcnt = a->bcnt - b->bcnt;
 }
 
+
+static void counters_nomap(STRUCT_COUNTERS_INFO *newcounters,
+			   unsigned int index)
+{
+	newcounters->counters[index] = ((STRUCT_COUNTERS) { 0, 0});
+	DEBUGP_C("NOMAP => zero\n");
+}
+
+static void counters_normal_map(STRUCT_COUNTERS_INFO *newcounters,
+				STRUCT_REPLACE *repl,
+				unsigned int index,
+				unsigned int mappos)
+{
+	/* Original read: X.
+	 * Atomic read on replacement: X + Y.
+	 * Currently in kernel: Z.
+	 * Want in kernel: X + Y + Z.
+	 * => Add in X + Y
+	 * => Add in replacement read.
+	 */
+	newcounters->counters[index] = repl->counters[mappos];
+	DEBUGP_C("NORMAL_MAP => mappos %u \n", mappos);
+}
+
+static void counters_map_zeroed(STRUCT_COUNTERS_INFO *newcounters,
+				STRUCT_REPLACE *repl,
+				unsigned int index,
+				unsigned int mappos,
+				STRUCT_COUNTERS *counters)
+{
+	/* Original read: X.
+	 * Atomic read on replacement: X + Y.
+	 * Currently in kernel: Z.
+	 * Want in kernel: Y + Z.
+	 * => Add in Y.
+	 * => Add in (replacement read - original read).
+	 */
+	subtract_counters(&newcounters->counters[index],
+			  &repl->counters[mappos],
+			  counters);
+	DEBUGP_C("ZEROED => mappos %u\n", mappos);
+}
+
+static void counters_map_set(STRUCT_COUNTERS_INFO *newcounters,
+			     unsigned int index,
+			     STRUCT_COUNTERS *counters)
+{
+	/* Want to set counter (iptables-restore) */
+
+	memcpy(&newcounters->counters[index], counters,
+		sizeof(STRUCT_COUNTERS));
+
+	DEBUGP_C("SET\n");
+}
+
+
 int
 TC_COMMIT(TC_HANDLE_T *handle)
 {
 	/* Replace, then map back the counters. */
 	STRUCT_REPLACE *repl;
 	STRUCT_COUNTERS_INFO *newcounters;
-	unsigned int i;
+	struct chain_head *c;
+	int ret;
 	size_t counterlen;
+	int new_number;
+	unsigned int new_size;
 
 	CHECK(*handle);
 
-	counterlen = sizeof(STRUCT_COUNTERS_INFO)
-			+ sizeof(STRUCT_COUNTERS) * (*handle)->new_number;
-
 #if 0
 	TC_DUMP_ENTRIES(*handle);
 #endif
@@ -1743,11 +1979,21 @@
 	if (!(*handle)->changed)
 		goto finished;
 
-	repl = malloc(sizeof(*repl) + (*handle)->entries.size);
+	new_number = iptcc_compile_table_prep(*handle, &new_size);
+	if (new_number < 0) {
+		errno = ENOMEM;
+		return 0;
+	}
+
+	repl = malloc(sizeof(*repl) + new_size);
 	if (!repl) {
 		errno = ENOMEM;
 		return 0;
 	}
+	memset(repl, 0, sizeof(*repl));
+
+	counterlen = sizeof(STRUCT_COUNTERS_INFO)
+			+ sizeof(STRUCT_COUNTERS) * new_number;
 
 	/* These are the old counters we will get from kernel */
 	repl->counters = malloc(sizeof(STRUCT_COUNTERS)
@@ -1757,7 +2003,6 @@
 		errno = ENOMEM;
 		return 0;
 	}
-
 	/* These are the counters we're going to put back, later. */
 	newcounters = malloc(counterlen);
 	if (!newcounters) {
@@ -1766,21 +2011,40 @@
 		errno = ENOMEM;
 		return 0;
 	}
+	memset(newcounters, 0, counterlen);
 
 	strcpy(repl->name, (*handle)->info.name);
-	repl->num_entries = (*handle)->new_number;
-	repl->size = (*handle)->entries.size;
-	memcpy(repl->hook_entry, (*handle)->info.hook_entry,
-	       sizeof(repl->hook_entry));
-	memcpy(repl->underflow, (*handle)->info.underflow,
-	       sizeof(repl->underflow));
+	repl->num_entries = new_number;
+	repl->size = new_size;
+
 	repl->num_counters = (*handle)->info.num_entries;
 	repl->valid_hooks = (*handle)->info.valid_hooks;
-	memcpy(repl->entries, (*handle)->entries.entrytable,
-	       (*handle)->entries.size);
+
+	DEBUGP("num_entries=%u, size=%u, num_counters=%u\n",
+		repl->num_entries, repl->size, repl->num_counters);
+
+	ret = iptcc_compile_table(*handle, repl);
+	if (ret < 0) {
+		errno = ret;
+		free(repl->counters);
+		free(repl);
+		return 0;
+	}
+
+
+#ifdef IPTC_DEBUG2
+	{
+		int fd = open("/tmp/libiptc-so_set_replace.blob", 
+				O_CREAT|O_WRONLY);
+		if (fd >= 0) {
+			write(fd, repl, sizeof(*repl) + repl->size);
+			close(fd);
+		}
+	}
+#endif
 
 	if (setsockopt(sockfd, TC_IPPROTO, SO_SET_REPLACE, repl,
-		       sizeof(*repl) + (*handle)->entries.size) < 0) {
+		       sizeof(*repl) + repl->size) < 0) {
 		free(repl->counters);
 		free(repl);
 		free(newcounters);
@@ -1789,50 +2053,65 @@
 
 	/* Put counters back. */
 	strcpy(newcounters->name, (*handle)->info.name);
-	newcounters->num_counters = (*handle)->new_number;
-	for (i = 0; i < (*handle)->new_number; i++) {
-		unsigned int mappos = (*handle)->counter_map[i].mappos;
-		switch ((*handle)->counter_map[i].maptype) {
-		case COUNTER_MAP_NOMAP:
-			newcounters->counters[i]
-				= ((STRUCT_COUNTERS){ 0, 0 });
-			break;
+	newcounters->num_counters = new_number;
 
-		case COUNTER_MAP_NORMAL_MAP:
-			/* Original read: X.
-			 * Atomic read on replacement: X + Y.
-			 * Currently in kernel: Z.
-			 * Want in kernel: X + Y + Z.
-			 * => Add in X + Y
-			 * => Add in replacement read.
-			 */
-			newcounters->counters[i] = repl->counters[mappos];
-			break;
+	list_for_each_entry(c, &(*handle)->chains, list) {
+		struct rule_head *r;
 
-		case COUNTER_MAP_ZEROED:
-			/* Original read: X.
-			 * Atomic read on replacement: X + Y.
-			 * Currently in kernel: Z.
-			 * Want in kernel: Y + Z.
-			 * => Add in Y.
-			 * => Add in (replacement read - original read).
-			 */
-			subtract_counters(&newcounters->counters[i],
-					  &repl->counters[mappos],
-					  &index2entry(*handle, i)->counters);
-			break;
+		/* Builtin chains have their own counters */
+		if (iptcc_is_builtin(c)) {
+			DEBUGP("counter for chain-index %u: ", c->foot_index);
+			switch(c->counter_map.maptype) {
+			case COUNTER_MAP_NOMAP:
+				counters_nomap(newcounters, c->foot_index);
+				break;
+			case COUNTER_MAP_NORMAL_MAP:
+				counters_normal_map(newcounters, repl,
+						    c->foot_index, 
+						    c->counter_map.mappos);
+				break;
+			case COUNTER_MAP_ZEROED:
+				counters_map_zeroed(newcounters, repl,
+						    c->foot_index, 
+						    c->counter_map.mappos,
+						    &c->counters);
+				break;
+			case COUNTER_MAP_SET:
+				counters_map_set(newcounters, c->foot_index,
+						 &c->counters);
+				break;
+			}
+		}
 
-		case COUNTER_MAP_SET:
-			/* Want to set counter (iptables-restore) */
+		list_for_each_entry(r, &c->rules, list) {
+			DEBUGP("counter for index %u: ", r->index);
+			switch (r->counter_map.maptype) {
+			case COUNTER_MAP_NOMAP:
+				counters_nomap(newcounters, r->index);
+				break;
 
-			memcpy(&newcounters->counters[i],
-			       &index2entry(*handle, i)->counters,
-			       sizeof(STRUCT_COUNTERS));
+			case COUNTER_MAP_NORMAL_MAP:
+				counters_normal_map(newcounters, repl,
+						    r->index, 
+						    r->counter_map.mappos);
+				break;
 
-			break;
+			case COUNTER_MAP_ZEROED:
+				counters_map_zeroed(newcounters, repl,
+						    r->index,
+						    r->counter_map.mappos,
+						    &r->entry->counters);
+				break;
+
+			case COUNTER_MAP_SET:
+				counters_map_set(newcounters, r->index,
+						 &r->entry->counters);
+				break;
+			}
 		}
 	}
 
+
 #ifdef KERNEL_64_USERSPACE_32
 	{
 		/* Kernel will think that pointer should be 64-bits, and get
@@ -1848,6 +2127,17 @@
 	}
 #endif /* KERNEL_64_USERSPACE_32 */
 
+#ifdef IPTC_DEBUG2
+	{
+		int fd = open("/tmp/libiptc-so_set_add_counters.blob", 
+				O_CREAT|O_WRONLY);
+		if (fd >= 0) {
+			write(fd, newcounters, counterlen);
+			close(fd);
+		}
+	}
+#endif
+
 	if (setsockopt(sockfd, TC_IPPROTO, SO_SET_ADD_COUNTERS,
 		       newcounters, counterlen) < 0) {
 		free(repl->counters);
diff --git a/libiptc/linux_list.h b/libiptc/linux_list.h
new file mode 100644
index 0000000..abdcf88
--- /dev/null
+++ b/libiptc/linux_list.h
@@ -0,0 +1,723 @@
+#ifndef _LINUX_LIST_H
+#define _LINUX_LIST_H
+
+#undef offsetof
+#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
+
+/**
+ * container_of - cast a member of a structure out to the containing structure
+ *
+ * @ptr:	the pointer to the member.
+ * @type:	the type of the container struct this is embedded in.
+ * @member:	the name of the member within the struct.
+ *
+ */
+#define container_of(ptr, type, member) ({			\
+        const typeof( ((type *)0)->member ) *__mptr = (ptr);	\
+        (type *)( (char *)__mptr - offsetof(type,member) );})
+
+/*
+ * Check at compile time that something is of a particular type.
+ * Always evaluates to 1 so you may use it easily in comparisons.
+ */
+#define typecheck(type,x) \
+({	type __dummy; \
+	typeof(x) __dummy2; \
+	(void)(&__dummy == &__dummy2); \
+	1; \
+})
+
+#define prefetch(x)		1
+
+/* empty define to make this work in userspace -HW */
+#define smp_wmb()
+
+/*
+ * These are non-NULL pointers that will result in page faults
+ * under normal circumstances, used to verify that nobody uses
+ * non-initialized list entries.
+ */
+#define LIST_POISON1  ((void *) 0x00100100)
+#define LIST_POISON2  ((void *) 0x00200200)
+
+/*
+ * Simple doubly linked list implementation.
+ *
+ * Some of the internal functions ("__xxx") are useful when
+ * manipulating whole lists rather than single entries, as
+ * sometimes we already know the next/prev entries and we can
+ * generate better code by using them directly rather than
+ * using the generic single-entry routines.
+ */
+
+struct list_head {
+	struct list_head *next, *prev;
+};
+
+#define LIST_HEAD_INIT(name) { &(name), &(name) }
+
+#define LIST_HEAD(name) \
+	struct list_head name = LIST_HEAD_INIT(name)
+
+#define INIT_LIST_HEAD(ptr) do { \
+	(ptr)->next = (ptr); (ptr)->prev = (ptr); \
+} while (0)
+
+/*
+ * Insert a new entry between two known consecutive entries.
+ *
+ * This is only for internal list manipulation where we know
+ * the prev/next entries already!
+ */
+static inline void __list_add(struct list_head *new,
+			      struct list_head *prev,
+			      struct list_head *next)
+{
+	next->prev = new;
+	new->next = next;
+	new->prev = prev;
+	prev->next = new;
+}
+
+/**
+ * list_add - add a new entry
+ * @new: new entry to be added
+ * @head: list head to add it after
+ *
+ * Insert a new entry after the specified head.
+ * This is good for implementing stacks.
+ */
+static inline void list_add(struct list_head *new, struct list_head *head)
+{
+	__list_add(new, head, head->next);
+}
+
+/**
+ * list_add_tail - add a new entry
+ * @new: new entry to be added
+ * @head: list head to add it before
+ *
+ * Insert a new entry before the specified head.
+ * This is useful for implementing queues.
+ */
+static inline void list_add_tail(struct list_head *new, struct list_head *head)
+{
+	__list_add(new, head->prev, head);
+}
+
+/*
+ * Insert a new entry between two known consecutive entries.
+ *
+ * This is only for internal list manipulation where we know
+ * the prev/next entries already!
+ */
+static inline void __list_add_rcu(struct list_head * new,
+		struct list_head * prev, struct list_head * next)
+{
+	new->next = next;
+	new->prev = prev;
+	smp_wmb();
+	next->prev = new;
+	prev->next = new;
+}
+
+/**
+ * list_add_rcu - add a new entry to rcu-protected list
+ * @new: new entry to be added
+ * @head: list head to add it after
+ *
+ * Insert a new entry after the specified head.
+ * This is good for implementing stacks.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as list_add_rcu()
+ * or list_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * list_for_each_entry_rcu().
+ */
+static inline void list_add_rcu(struct list_head *new, struct list_head *head)
+{
+	__list_add_rcu(new, head, head->next);
+}
+
+/**
+ * list_add_tail_rcu - add a new entry to rcu-protected list
+ * @new: new entry to be added
+ * @head: list head to add it before
+ *
+ * Insert a new entry before the specified head.
+ * This is useful for implementing queues.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as list_add_tail_rcu()
+ * or list_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * list_for_each_entry_rcu().
+ */
+static inline void list_add_tail_rcu(struct list_head *new,
+					struct list_head *head)
+{
+	__list_add_rcu(new, head->prev, head);
+}
+
+/*
+ * Delete a list entry by making the prev/next entries
+ * point to each other.
+ *
+ * This is only for internal list manipulation where we know
+ * the prev/next entries already!
+ */
+static inline void __list_del(struct list_head * prev, struct list_head * next)
+{
+	next->prev = prev;
+	prev->next = next;
+}
+
+/**
+ * list_del - deletes entry from list.
+ * @entry: the element to delete from the list.
+ * Note: list_empty on entry does not return true after this, the entry is
+ * in an undefined state.
+ */
+static inline void list_del(struct list_head *entry)
+{
+	__list_del(entry->prev, entry->next);
+	entry->next = LIST_POISON1;
+	entry->prev = LIST_POISON2;
+}
+
+/**
+ * list_del_rcu - deletes entry from list without re-initialization
+ * @entry: the element to delete from the list.
+ *
+ * Note: list_empty on entry does not return true after this,
+ * the entry is in an undefined state. It is useful for RCU based
+ * lockfree traversal.
+ *
+ * In particular, it means that we can not poison the forward
+ * pointers that may still be used for walking the list.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as list_del_rcu()
+ * or list_add_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * list_for_each_entry_rcu().
+ *
+ * Note that the caller is not permitted to immediately free
+ * the newly deleted entry.  Instead, either synchronize_kernel()
+ * or call_rcu() must be used to defer freeing until an RCU
+ * grace period has elapsed.
+ */
+static inline void list_del_rcu(struct list_head *entry)
+{
+	__list_del(entry->prev, entry->next);
+	entry->prev = LIST_POISON2;
+}
+
+/**
+ * list_del_init - deletes entry from list and reinitialize it.
+ * @entry: the element to delete from the list.
+ */
+static inline void list_del_init(struct list_head *entry)
+{
+	__list_del(entry->prev, entry->next);
+	INIT_LIST_HEAD(entry);
+}
+
+/**
+ * list_move - delete from one list and add as another's head
+ * @list: the entry to move
+ * @head: the head that will precede our entry
+ */
+static inline void list_move(struct list_head *list, struct list_head *head)
+{
+        __list_del(list->prev, list->next);
+        list_add(list, head);
+}
+
+/**
+ * list_move_tail - delete from one list and add as another's tail
+ * @list: the entry to move
+ * @head: the head that will follow our entry
+ */
+static inline void list_move_tail(struct list_head *list,
+				  struct list_head *head)
+{
+        __list_del(list->prev, list->next);
+        list_add_tail(list, head);
+}
+
+/**
+ * list_empty - tests whether a list is empty
+ * @head: the list to test.
+ */
+static inline int list_empty(const struct list_head *head)
+{
+	return head->next == head;
+}
+
+/**
+ * list_empty_careful - tests whether a list is
+ * empty _and_ checks that no other CPU might be
+ * in the process of still modifying either member
+ *
+ * NOTE: using list_empty_careful() without synchronization
+ * can only be safe if the only activity that can happen
+ * to the list entry is list_del_init(). Eg. it cannot be used
+ * if another CPU could re-list_add() it.
+ *
+ * @head: the list to test.
+ */
+static inline int list_empty_careful(const struct list_head *head)
+{
+	struct list_head *next = head->next;
+	return (next == head) && (next == head->prev);
+}
+
+static inline void __list_splice(struct list_head *list,
+				 struct list_head *head)
+{
+	struct list_head *first = list->next;
+	struct list_head *last = list->prev;
+	struct list_head *at = head->next;
+
+	first->prev = head;
+	head->next = first;
+
+	last->next = at;
+	at->prev = last;
+}
+
+/**
+ * list_splice - join two lists
+ * @list: the new list to add.
+ * @head: the place to add it in the first list.
+ */
+static inline void list_splice(struct list_head *list, struct list_head *head)
+{
+	if (!list_empty(list))
+		__list_splice(list, head);
+}
+
+/**
+ * list_splice_init - join two lists and reinitialise the emptied list.
+ * @list: the new list to add.
+ * @head: the place to add it in the first list.
+ *
+ * The list at @list is reinitialised
+ */
+static inline void list_splice_init(struct list_head *list,
+				    struct list_head *head)
+{
+	if (!list_empty(list)) {
+		__list_splice(list, head);
+		INIT_LIST_HEAD(list);
+	}
+}
+
+/**
+ * list_entry - get the struct for this entry
+ * @ptr:	the &struct list_head pointer.
+ * @type:	the type of the struct this is embedded in.
+ * @member:	the name of the list_struct within the struct.
+ */
+#define list_entry(ptr, type, member) \
+	container_of(ptr, type, member)
+
+/**
+ * list_for_each	-	iterate over a list
+ * @pos:	the &struct list_head to use as a loop counter.
+ * @head:	the head for your list.
+ */
+#define list_for_each(pos, head) \
+	for (pos = (head)->next, prefetch(pos->next); pos != (head); \
+        	pos = pos->next, prefetch(pos->next))
+
+/**
+ * __list_for_each	-	iterate over a list
+ * @pos:	the &struct list_head to use as a loop counter.
+ * @head:	the head for your list.
+ *
+ * This variant differs from list_for_each() in that it's the
+ * simplest possible list iteration code, no prefetching is done.
+ * Use this for code that knows the list to be very short (empty
+ * or 1 entry) most of the time.
+ */
+#define __list_for_each(pos, head) \
+	for (pos = (head)->next; pos != (head); pos = pos->next)
+
+/**
+ * list_for_each_prev	-	iterate over a list backwards
+ * @pos:	the &struct list_head to use as a loop counter.
+ * @head:	the head for your list.
+ */
+#define list_for_each_prev(pos, head) \
+	for (pos = (head)->prev, prefetch(pos->prev); pos != (head); \
+        	pos = pos->prev, prefetch(pos->prev))
+
+/**
+ * list_for_each_safe	-	iterate over a list safe against removal of list entry
+ * @pos:	the &struct list_head to use as a loop counter.
+ * @n:		another &struct list_head to use as temporary storage
+ * @head:	the head for your list.
+ */
+#define list_for_each_safe(pos, n, head) \
+	for (pos = (head)->next, n = pos->next; pos != (head); \
+		pos = n, n = pos->next)
+
+/**
+ * list_for_each_entry	-	iterate over list of given type
+ * @pos:	the type * to use as a loop counter.
+ * @head:	the head for your list.
+ * @member:	the name of the list_struct within the struct.
+ */
+#define list_for_each_entry(pos, head, member)				\
+	for (pos = list_entry((head)->next, typeof(*pos), member),	\
+		     prefetch(pos->member.next);			\
+	     &pos->member != (head); 					\
+	     pos = list_entry(pos->member.next, typeof(*pos), member),	\
+		     prefetch(pos->member.next))
+
+/**
+ * list_for_each_entry_reverse - iterate backwards over list of given type.
+ * @pos:	the type * to use as a loop counter.
+ * @head:	the head for your list.
+ * @member:	the name of the list_struct within the struct.
+ */
+#define list_for_each_entry_reverse(pos, head, member)			\
+	for (pos = list_entry((head)->prev, typeof(*pos), member),	\
+		     prefetch(pos->member.prev);			\
+	     &pos->member != (head); 					\
+	     pos = list_entry(pos->member.prev, typeof(*pos), member),	\
+		     prefetch(pos->member.prev))
+
+/**
+ * list_prepare_entry - prepare a pos entry for use as a start point in
+ *			list_for_each_entry_continue
+ * @pos:	the type * to use as a start point
+ * @head:	the head of the list
+ * @member:	the name of the list_struct within the struct.
+ */
+#define list_prepare_entry(pos, head, member) \
+	((pos) ? : list_entry(head, typeof(*pos), member))
+
+/**
+ * list_for_each_entry_continue -	iterate over list of given type
+ *			continuing after existing point
+ * @pos:	the type * to use as a loop counter.
+ * @head:	the head for your list.
+ * @member:	the name of the list_struct within the struct.
+ */
+#define list_for_each_entry_continue(pos, head, member) 		\
+	for (pos = list_entry(pos->member.next, typeof(*pos), member),	\
+		     prefetch(pos->member.next);			\
+	     &pos->member != (head);					\
+	     pos = list_entry(pos->member.next, typeof(*pos), member),	\
+		     prefetch(pos->member.next))
+
+/**
+ * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
+ * @pos:	the type * to use as a loop counter.
+ * @n:		another type * to use as temporary storage
+ * @head:	the head for your list.
+ * @member:	the name of the list_struct within the struct.
+ */
+#define list_for_each_entry_safe(pos, n, head, member)			\
+	for (pos = list_entry((head)->next, typeof(*pos), member),	\
+		n = list_entry(pos->member.next, typeof(*pos), member);	\
+	     &pos->member != (head); 					\
+	     pos = n, n = list_entry(n->member.next, typeof(*n), member))
+
+/**
+ * list_for_each_rcu	-	iterate over an rcu-protected list
+ * @pos:	the &struct list_head to use as a loop counter.
+ * @head:	the head for your list.
+ *
+ * This list-traversal primitive may safely run concurrently with
+ * the _rcu list-mutation primitives such as list_add_rcu()
+ * as long as the traversal is guarded by rcu_read_lock().
+ */
+#define list_for_each_rcu(pos, head) \
+	for (pos = (head)->next, prefetch(pos->next); pos != (head); \
+        	pos = pos->next, ({ smp_read_barrier_depends(); 0;}), prefetch(pos->next))
+
+#define __list_for_each_rcu(pos, head) \
+	for (pos = (head)->next; pos != (head); \
+        	pos = pos->next, ({ smp_read_barrier_depends(); 0;}))
+
+/**
+ * list_for_each_safe_rcu	-	iterate over an rcu-protected list safe
+ *					against removal of list entry
+ * @pos:	the &struct list_head to use as a loop counter.
+ * @n:		another &struct list_head to use as temporary storage
+ * @head:	the head for your list.
+ *
+ * This list-traversal primitive may safely run concurrently with
+ * the _rcu list-mutation primitives such as list_add_rcu()
+ * as long as the traversal is guarded by rcu_read_lock().
+ */
+#define list_for_each_safe_rcu(pos, n, head) \
+	for (pos = (head)->next, n = pos->next; pos != (head); \
+		pos = n, ({ smp_read_barrier_depends(); 0;}), n = pos->next)
+
+/**
+ * list_for_each_entry_rcu	-	iterate over rcu list of given type
+ * @pos:	the type * to use as a loop counter.
+ * @head:	the head for your list.
+ * @member:	the name of the list_struct within the struct.
+ *
+ * This list-traversal primitive may safely run concurrently with
+ * the _rcu list-mutation primitives such as list_add_rcu()
+ * as long as the traversal is guarded by rcu_read_lock().
+ */
+#define list_for_each_entry_rcu(pos, head, member)			\
+	for (pos = list_entry((head)->next, typeof(*pos), member),	\
+		     prefetch(pos->member.next);			\
+	     &pos->member != (head); 					\
+	     pos = list_entry(pos->member.next, typeof(*pos), member),	\
+		     ({ smp_read_barrier_depends(); 0;}),		\
+		     prefetch(pos->member.next))
+
+
+/**
+ * list_for_each_continue_rcu	-	iterate over an rcu-protected list
+ *			continuing after existing point.
+ * @pos:	the &struct list_head to use as a loop counter.
+ * @head:	the head for your list.
+ *
+ * This list-traversal primitive may safely run concurrently with
+ * the _rcu list-mutation primitives such as list_add_rcu()
+ * as long as the traversal is guarded by rcu_read_lock().
+ */
+#define list_for_each_continue_rcu(pos, head) \
+	for ((pos) = (pos)->next, prefetch((pos)->next); (pos) != (head); \
+        	(pos) = (pos)->next, ({ smp_read_barrier_depends(); 0;}), prefetch((pos)->next))
+
+/*
+ * Double linked lists with a single pointer list head.
+ * Mostly useful for hash tables where the two pointer list head is
+ * too wasteful.
+ * You lose the ability to access the tail in O(1).
+ */
+
+struct hlist_head {
+	struct hlist_node *first;
+};
+
+struct hlist_node {
+	struct hlist_node *next, **pprev;
+};
+
+#define HLIST_HEAD_INIT { .first = NULL }
+#define HLIST_HEAD(name) struct hlist_head name = {  .first = NULL }
+#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
+#define INIT_HLIST_NODE(ptr) ((ptr)->next = NULL, (ptr)->pprev = NULL)
+
+static inline int hlist_unhashed(const struct hlist_node *h)
+{
+	return !h->pprev;
+}
+
+static inline int hlist_empty(const struct hlist_head *h)
+{
+	return !h->first;
+}
+
+static inline void __hlist_del(struct hlist_node *n)
+{
+	struct hlist_node *next = n->next;
+	struct hlist_node **pprev = n->pprev;
+	*pprev = next;
+	if (next)
+		next->pprev = pprev;
+}
+
+static inline void hlist_del(struct hlist_node *n)
+{
+	__hlist_del(n);
+	n->next = LIST_POISON1;
+	n->pprev = LIST_POISON2;
+}
+
+/**
+ * hlist_del_rcu - deletes entry from hash list without re-initialization
+ * @n: the element to delete from the hash list.
+ *
+ * Note: list_unhashed() on entry does not return true after this,
+ * the entry is in an undefined state. It is useful for RCU based
+ * lockfree traversal.
+ *
+ * In particular, it means that we can not poison the forward
+ * pointers that may still be used for walking the hash list.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as hlist_add_head_rcu()
+ * or hlist_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * hlist_for_each_entry().
+ */
+static inline void hlist_del_rcu(struct hlist_node *n)
+{
+	__hlist_del(n);
+	n->pprev = LIST_POISON2;
+}
+
+static inline void hlist_del_init(struct hlist_node *n)
+{
+	if (n->pprev)  {
+		__hlist_del(n);
+		INIT_HLIST_NODE(n);
+	}
+}
+
+#define hlist_del_rcu_init hlist_del_init
+
+static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
+{
+	struct hlist_node *first = h->first;
+	n->next = first;
+	if (first)
+		first->pprev = &n->next;
+	h->first = n;
+	n->pprev = &h->first;
+}
+
+
+/**
+ * hlist_add_head_rcu - adds the specified element to the specified hlist,
+ * while permitting racing traversals.
+ * @n: the element to add to the hash list.
+ * @h: the list to add to.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as hlist_add_head_rcu()
+ * or hlist_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * hlist_for_each_entry(), but only if smp_read_barrier_depends()
+ * is used to prevent memory-consistency problems on Alpha CPUs.
+ * Regardless of the type of CPU, the list-traversal primitive
+ * must be guarded by rcu_read_lock().
+ *
+ * OK, so why don't we have an hlist_for_each_entry_rcu()???
+ */
+static inline void hlist_add_head_rcu(struct hlist_node *n,
+					struct hlist_head *h)
+{
+	struct hlist_node *first = h->first;
+	n->next = first;
+	n->pprev = &h->first;
+	smp_wmb();
+	if (first)
+		first->pprev = &n->next;
+	h->first = n;
+}
+
+/* next must be != NULL */
+static inline void hlist_add_before(struct hlist_node *n,
+					struct hlist_node *next)
+{
+	n->pprev = next->pprev;
+	n->next = next;
+	next->pprev = &n->next;
+	*(n->pprev) = n;
+}
+
+static inline void hlist_add_after(struct hlist_node *n,
+					struct hlist_node *next)
+{
+	next->next = n->next;
+	n->next = next;
+	next->pprev = &n->next;
+
+	if(next->next)
+		next->next->pprev  = &next->next;
+}
+
+#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
+
+#define hlist_for_each(pos, head) \
+	for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \
+	     pos = pos->next)
+
+#define hlist_for_each_safe(pos, n, head) \
+	for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
+	     pos = n)
+
+/**
+ * hlist_for_each_entry	- iterate over list of given type
+ * @tpos:	the type * to use as a loop counter.
+ * @pos:	the &struct hlist_node to use as a loop counter.
+ * @head:	the head for your list.
+ * @member:	the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry(tpos, pos, head, member)			 \
+	for (pos = (head)->first;					 \
+	     pos && ({ prefetch(pos->next); 1;}) &&			 \
+		({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
+	     pos = pos->next)
+
+/**
+ * hlist_for_each_entry_continue - iterate over a hlist continuing after existing point
+ * @tpos:	the type * to use as a loop counter.
+ * @pos:	the &struct hlist_node to use as a loop counter.
+ * @member:	the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry_continue(tpos, pos, member)		 \
+	for (pos = (pos)->next;						 \
+	     pos && ({ prefetch(pos->next); 1;}) &&			 \
+		({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
+	     pos = pos->next)
+
+/**
+ * hlist_for_each_entry_from - iterate over a hlist continuing from existing point
+ * @tpos:	the type * to use as a loop counter.
+ * @pos:	the &struct hlist_node to use as a loop counter.
+ * @member:	the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry_from(tpos, pos, member)			 \
+	for (; pos && ({ prefetch(pos->next); 1;}) &&			 \
+		({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
+	     pos = pos->next)
+
+/**
+ * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
+ * @tpos:	the type * to use as a loop counter.
+ * @pos:	the &struct hlist_node to use as a loop counter.
+ * @n:		another &struct hlist_node to use as temporary storage
+ * @head:	the head for your list.
+ * @member:	the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry_safe(tpos, pos, n, head, member) 		 \
+	for (pos = (head)->first;					 \
+	     pos && ({ n = pos->next; 1; }) && 				 \
+		({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
+	     pos = n)
+
+/**
+ * hlist_for_each_entry_rcu - iterate over rcu list of given type
+ * @pos:	the type * to use as a loop counter.
+ * @pos:	the &struct hlist_node to use as a loop counter.
+ * @head:	the head for your list.
+ * @member:	the name of the hlist_node within the struct.
+ *
+ * This list-traversal primitive may safely run concurrently with
+ * the _rcu list-mutation primitives such as hlist_add_rcu()
+ * as long as the traversal is guarded by rcu_read_lock().
+ */
+#define hlist_for_each_entry_rcu(tpos, pos, head, member)		 \
+	for (pos = (head)->first;					 \
+	     pos && ({ prefetch(pos->next); 1;}) &&			 \
+		({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
+	     pos = pos->next, ({ smp_read_barrier_depends(); 0; }) )
+
+#endif
diff --git a/libiptc/linux_stddef.h b/libiptc/linux_stddef.h
new file mode 100644
index 0000000..56416f1
--- /dev/null
+++ b/libiptc/linux_stddef.h
@@ -0,0 +1,39 @@
+#ifndef _LINUX_STDDEF_H
+#define _LINUX_STDDEF_H
+
+#undef NULL
+#if defined(__cplusplus)
+#define NULL 0
+#else
+#define NULL ((void *)0)
+#endif
+
+#undef offsetof
+#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
+
+
+/**
+ * container_of - cast a member of a structure out to the containing structure
+ *
+ * @ptr:	the pointer to the member.
+ * @type:	the type of the container struct this is embedded in.
+ * @member:	the name of the member within the struct.
+ *
+ */
+#define container_of(ptr, type, member) ({			\
+        const typeof( ((type *)0)->member ) *__mptr = (ptr);	\
+        (type *)( (char *)__mptr - offsetof(type,member) );})
+
+/*
+ * Check at compile time that something is of a particular type.
+ * Always evaluates to 1 so you may use it easily in comparisons.
+ */
+#define typecheck(type,x) \
+({	type __dummy; \
+	typeof(x) __dummy2; \
+	(void)(&__dummy == &__dummy2); \
+	1; \
+})
+
+
+#endif