Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index e7d26d9..8ce0ce2 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -71,7 +71,7 @@
 
 /* Describe an entry in the "incomplete datagrams" queue. */
 struct ipq {
-	struct ipq	*next;		/* linked list pointers			*/
+	struct hlist_node list;
 	struct list_head lru_list;	/* lru list member 			*/
 	u32		user;
 	u32		saddr;
@@ -89,7 +89,6 @@
 	spinlock_t	lock;
 	atomic_t	refcnt;
 	struct timer_list timer;	/* when will this queue expire?		*/
-	struct ipq	**pprev;
 	int		iif;
 	struct timeval	stamp;
 };
@@ -99,7 +98,7 @@
 #define IPQ_HASHSZ	64
 
 /* Per-bucket lock is easy to add now. */
-static struct ipq *ipq_hash[IPQ_HASHSZ];
+static struct hlist_head ipq_hash[IPQ_HASHSZ];
 static DEFINE_RWLOCK(ipfrag_lock);
 static u32 ipfrag_hash_rnd;
 static LIST_HEAD(ipq_lru_list);
@@ -107,9 +106,7 @@
 
 static __inline__ void __ipq_unlink(struct ipq *qp)
 {
-	if(qp->next)
-		qp->next->pprev = qp->pprev;
-	*qp->pprev = qp->next;
+	hlist_del(&qp->list);
 	list_del(&qp->lru_list);
 	ip_frag_nqueues--;
 }
@@ -139,27 +136,18 @@
 	get_random_bytes(&ipfrag_hash_rnd, sizeof(u32));
 	for (i = 0; i < IPQ_HASHSZ; i++) {
 		struct ipq *q;
+		struct hlist_node *p, *n;
 
-		q = ipq_hash[i];
-		while (q) {
-			struct ipq *next = q->next;
+		hlist_for_each_entry_safe(q, p, n, &ipq_hash[i], list) {
 			unsigned int hval = ipqhashfn(q->id, q->saddr,
 						      q->daddr, q->protocol);
 
 			if (hval != i) {
-				/* Unlink. */
-				if (q->next)
-					q->next->pprev = q->pprev;
-				*q->pprev = q->next;
+				hlist_del(&q->list);
 
 				/* Relink to new hash chain. */
-				if ((q->next = ipq_hash[hval]) != NULL)
-					q->next->pprev = &q->next;
-				ipq_hash[hval] = q;
-				q->pprev = &ipq_hash[hval];
+				hlist_add_head(&q->list, &ipq_hash[hval]);
 			}
-
-			q = next;
 		}
 	}
 	write_unlock(&ipfrag_lock);
@@ -310,14 +298,16 @@
 static struct ipq *ip_frag_intern(unsigned int hash, struct ipq *qp_in)
 {
 	struct ipq *qp;
-
+#ifdef CONFIG_SMP
+	struct hlist_node *n;
+#endif
 	write_lock(&ipfrag_lock);
 #ifdef CONFIG_SMP
 	/* With SMP race we have to recheck hash table, because
 	 * such entry could be created on other cpu, while we
 	 * promoted read lock to write lock.
 	 */
-	for(qp = ipq_hash[hash]; qp; qp = qp->next) {
+	hlist_for_each_entry(qp, n, &ipq_hash[hash], list) {
 		if(qp->id == qp_in->id		&&
 		   qp->saddr == qp_in->saddr	&&
 		   qp->daddr == qp_in->daddr	&&
@@ -337,10 +327,7 @@
 		atomic_inc(&qp->refcnt);
 
 	atomic_inc(&qp->refcnt);
-	if((qp->next = ipq_hash[hash]) != NULL)
-		qp->next->pprev = &qp->next;
-	ipq_hash[hash] = qp;
-	qp->pprev = &ipq_hash[hash];
+	hlist_add_head(&qp->list, &ipq_hash[hash]);
 	INIT_LIST_HEAD(&qp->lru_list);
 	list_add_tail(&qp->lru_list, &ipq_lru_list);
 	ip_frag_nqueues++;
@@ -392,9 +379,10 @@
 	__u8 protocol = iph->protocol;
 	unsigned int hash = ipqhashfn(id, saddr, daddr, protocol);
 	struct ipq *qp;
+	struct hlist_node *n;
 
 	read_lock(&ipfrag_lock);
-	for(qp = ipq_hash[hash]; qp; qp = qp->next) {
+	hlist_for_each_entry(qp, n, &ipq_hash[hash], list) {
 		if(qp->id == id		&&
 		   qp->saddr == saddr	&&
 		   qp->daddr == daddr	&&
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index e4fe9ee..5d316cb 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -74,7 +74,7 @@
 
 struct frag_queue
 {
-	struct frag_queue	*next;
+	struct hlist_node	list;
 	struct list_head lru_list;		/* lru list member	*/
 
 	__u32			id;		/* fragment id		*/
@@ -95,14 +95,13 @@
 #define FIRST_IN		2
 #define LAST_IN			1
 	__u16			nhoffset;
-	struct frag_queue	**pprev;
 };
 
 /* Hash table. */
 
 #define IP6Q_HASHSZ	64
 
-static struct frag_queue *ip6_frag_hash[IP6Q_HASHSZ];
+static struct hlist_head ip6_frag_hash[IP6Q_HASHSZ];
 static DEFINE_RWLOCK(ip6_frag_lock);
 static u32 ip6_frag_hash_rnd;
 static LIST_HEAD(ip6_frag_lru_list);
@@ -110,9 +109,7 @@
 
 static __inline__ void __fq_unlink(struct frag_queue *fq)
 {
-	if(fq->next)
-		fq->next->pprev = fq->pprev;
-	*fq->pprev = fq->next;
+	hlist_del(&fq->list);
 	list_del(&fq->lru_list);
 	ip6_frag_nqueues--;
 }
@@ -163,28 +160,21 @@
 	get_random_bytes(&ip6_frag_hash_rnd, sizeof(u32));
 	for (i = 0; i < IP6Q_HASHSZ; i++) {
 		struct frag_queue *q;
+		struct hlist_node *p, *n;
 
-		q = ip6_frag_hash[i];
-		while (q) {
-			struct frag_queue *next = q->next;
+		hlist_for_each_entry_safe(q, p, n, &ip6_frag_hash[i], list) {
 			unsigned int hval = ip6qhashfn(q->id,
 						       &q->saddr,
 						       &q->daddr);
 
 			if (hval != i) {
-				/* Unlink. */
-				if (q->next)
-					q->next->pprev = q->pprev;
-				*q->pprev = q->next;
+				hlist_del(&q->list);
 
 				/* Relink to new hash chain. */
-				if ((q->next = ip6_frag_hash[hval]) != NULL)
-					q->next->pprev = &q->next;
-				ip6_frag_hash[hval] = q;
-				q->pprev = &ip6_frag_hash[hval];
-			}
+				hlist_add_head(&q->list,
+					       &ip6_frag_hash[hval]);
 
-			q = next;
+			}
 		}
 	}
 	write_unlock(&ip6_frag_lock);
@@ -337,10 +327,13 @@
 					  struct frag_queue *fq_in)
 {
 	struct frag_queue *fq;
+#ifdef CONFIG_SMP
+	struct hlist_node *n;
+#endif
 
 	write_lock(&ip6_frag_lock);
 #ifdef CONFIG_SMP
-	for (fq = ip6_frag_hash[hash]; fq; fq = fq->next) {
+	hlist_for_each_entry(fq, n, &ip6_frag_hash[hash], list) {
 		if (fq->id == fq_in->id && 
 		    ipv6_addr_equal(&fq_in->saddr, &fq->saddr) &&
 		    ipv6_addr_equal(&fq_in->daddr, &fq->daddr)) {
@@ -358,10 +351,7 @@
 		atomic_inc(&fq->refcnt);
 
 	atomic_inc(&fq->refcnt);
-	if((fq->next = ip6_frag_hash[hash]) != NULL)
-		fq->next->pprev = &fq->next;
-	ip6_frag_hash[hash] = fq;
-	fq->pprev = &ip6_frag_hash[hash];
+	hlist_add_head(&fq->list, &ip6_frag_hash[hash]);
 	INIT_LIST_HEAD(&fq->lru_list);
 	list_add_tail(&fq->lru_list, &ip6_frag_lru_list);
 	ip6_frag_nqueues++;
@@ -401,10 +391,11 @@
 fq_find(u32 id, struct in6_addr *src, struct in6_addr *dst)
 {
 	struct frag_queue *fq;
+	struct hlist_node *n;
 	unsigned int hash = ip6qhashfn(id, src, dst);
 
 	read_lock(&ip6_frag_lock);
-	for(fq = ip6_frag_hash[hash]; fq; fq = fq->next) {
+	hlist_for_each_entry(fq, n, &ip6_frag_hash[hash], list) {
 		if (fq->id == id && 
 		    ipv6_addr_equal(src, &fq->saddr) &&
 		    ipv6_addr_equal(dst, &fq->daddr)) {
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index ea094b2..1da6783 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -387,7 +387,7 @@
 static void nf_ct_unlink_expect(struct nf_conntrack_expect *exp)
 {
 	ASSERT_WRITE_LOCK(&nf_conntrack_lock);
-	NF_CT_ASSERT(!timer_pending(&exp_timeout));
+	NF_CT_ASSERT(!timer_pending(&exp->timeout));
 	list_del(&exp->list);
 	NF_CT_STAT_INC(expect_delete);
 	exp->master->expecting--;