Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6:
  [NET]: Fix networking compilation errors
  [AF_RXRPC/AFS]: Arch-specific fixes.
  [AFS]: Fix VLocation record update wakeup
  [NET]: Revert sk_buff walker cleanups.
diff --git a/arch/ia64/lib/csum_partial_copy.c b/arch/ia64/lib/csum_partial_copy.c
index 503dfe6..118daf5 100644
--- a/arch/ia64/lib/csum_partial_copy.c
+++ b/arch/ia64/lib/csum_partial_copy.c
@@ -128,6 +128,8 @@
 	return (__force __wsum)result;
 }
 
+EXPORT_SYMBOL(csum_partial_copy_from_user);
+
 __wsum
 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
 {
diff --git a/fs/Kconfig b/fs/Kconfig
index e33c089..a42f767 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -2020,6 +2020,7 @@
 	tristate "Andrew File System support (AFS) (EXPERIMENTAL)"
 	depends on INET && EXPERIMENTAL
 	select AF_RXRPC
+	select KEYS
 	help
 	  If you say Y here, you will get an experimental Andrew File System
 	  driver. It currently only supports unsecured read-only AFS access.
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 6dd3197..34665f7 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -367,7 +367,7 @@
 	u32		time_low;			/* low part of timestamp */
 	u16		time_mid;			/* mid part of timestamp */
 	u16		time_hi_and_version;		/* high part of timestamp and version  */
-#define AFS_UUID_TO_UNIX_TIME	0x01b21dd213814000
+#define AFS_UUID_TO_UNIX_TIME	0x01b21dd213814000ULL
 #define AFS_UUID_TIMEHI_MASK	0x0fff
 #define AFS_UUID_VERSION_TIME	0x1000	/* time-based UUID */
 #define AFS_UUID_VERSION_NAME	0x3000	/* name-based UUID */
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index e7b0473..222c1a3 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -772,7 +772,7 @@
 
 	if (call->offset < count) {
 		if (last) {
-			_leave(" = -EBADMSG [%d < %lu]", call->offset, count);
+			_leave(" = -EBADMSG [%d < %zu]", call->offset, count);
 			return -EBADMSG;
 		}
 		_leave(" = -EAGAIN");
diff --git a/fs/afs/use-rtnetlink.c b/fs/afs/use-rtnetlink.c
index 82f0daa..f8991c7 100644
--- a/fs/afs/use-rtnetlink.c
+++ b/fs/afs/use-rtnetlink.c
@@ -243,7 +243,7 @@
 		desc->datalen = kernel_recvmsg(desc->nlsock, &msg, iov, 1,
 					       desc->datamax, 0);
 		if (desc->datalen < 0) {
-			_leave(" = %ld [recv]", desc->datalen);
+			_leave(" = %zd [recv]", desc->datalen);
 			return desc->datalen;
 		}
 
diff --git a/fs/afs/vlocation.c b/fs/afs/vlocation.c
index 74cce17..6c8e95a 100644
--- a/fs/afs/vlocation.c
+++ b/fs/afs/vlocation.c
@@ -416,8 +416,8 @@
 		goto error_abandon;
 	spin_lock(&vl->lock);
 	vl->state = AFS_VL_VALID;
-	wake_up(&vl->waitq);
 	spin_unlock(&vl->lock);
+	wake_up(&vl->waitq);
 
 	/* schedule for regular updates */
 	afs_vlocation_queue_for_updates(vl);
@@ -442,7 +442,7 @@
 
 		_debug("invalid [state %d]", state);
 
-		if ((state == AFS_VL_NEW || state == AFS_VL_NO_VOLUME)) {
+		if (state == AFS_VL_NEW || state == AFS_VL_NO_VOLUME) {
 			vl->state = AFS_VL_CREATING;
 			spin_unlock(&vl->lock);
 			goto fill_in_record;
@@ -453,11 +453,10 @@
 		_debug("wait");
 
 		spin_unlock(&vl->lock);
-		ret = wait_event_interruptible(
-			vl->waitq,
-			vl->state == AFS_VL_NEW ||
-			vl->state == AFS_VL_VALID ||
-			vl->state == AFS_VL_NO_VOLUME);
+		ret = wait_event_interruptible(vl->waitq,
+					       vl->state == AFS_VL_NEW ||
+					       vl->state == AFS_VL_VALID ||
+					       vl->state == AFS_VL_NO_VOLUME);
 		if (ret < 0)
 			goto error;
 		spin_lock(&vl->lock);
@@ -471,8 +470,8 @@
 error_abandon:
 	spin_lock(&vl->lock);
 	vl->state = AFS_VL_NEW;
-	wake_up(&vl->waitq);
 	spin_unlock(&vl->lock);
+	wake_up(&vl->waitq);
 error:
 	ASSERT(vl != NULL);
 	afs_put_vlocation(vl);
@@ -675,7 +674,6 @@
 	case 0:
 		afs_vlocation_apply_update(vl, &vldb);
 		vl->state = AFS_VL_VALID;
-		wake_up(&vl->waitq);
 		break;
 	case -ENOMEDIUM:
 		vl->state = AFS_VL_VOLUME_DELETED;
@@ -685,6 +683,7 @@
 		break;
 	}
 	spin_unlock(&vl->lock);
+	wake_up(&vl->waitq);
 
 	/* and then reschedule */
 	_debug("reschedule");
diff --git a/include/net/wext.h b/include/net/wext.h
index 5574183..c02b8de 100644
--- a/include/net/wext.h
+++ b/include/net/wext.h
@@ -10,7 +10,7 @@
 extern int wext_handle_ioctl(struct ifreq *ifr, unsigned int cmd,
 			     void __user *arg);
 #else
-static inline int wext_proc_init()
+static inline int wext_proc_init(void)
 {
 	return 0;
 }
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index f5cfde8..1b30331 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -279,6 +279,8 @@
 
 	return ktime_add(kt, tmp);
 }
+
+EXPORT_SYMBOL_GPL(ktime_add_ns);
 # endif /* !CONFIG_KTIME_SCALAR */
 
 /*
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 16eda21..f6a92a0 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -937,11 +937,11 @@
 static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset,
 				   int len, unsigned long sum)
 {
-	int end = skb_headlen(skb);
+	int start = skb_headlen(skb);
 	int i, copy;
 
 	/* checksum stuff in header space */
-	if ((copy = end - offset) > 0) {
+	if ( (copy = start - offset) > 0) {
 		if (copy > len)
 			copy = len;
 		sum = atalk_sum_partial(skb->data + offset, copy, sum);
@@ -953,9 +953,11 @@
 
 	/* checksum stuff in frags */
 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-		BUG_TRAP(len >= 0);
+		int end;
 
-		end = offset + skb_shinfo(skb)->frags[i].size;
+		BUG_TRAP(start <= offset + len);
+
+		end = start + skb_shinfo(skb)->frags[i].size;
 		if ((copy = end - offset) > 0) {
 			u8 *vaddr;
 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -963,31 +965,36 @@
 			if (copy > len)
 				copy = len;
 			vaddr = kmap_skb_frag(frag);
-			sum = atalk_sum_partial(vaddr + frag->page_offset,
-						copy, sum);
+			sum = atalk_sum_partial(vaddr + frag->page_offset +
+						  offset - start, copy, sum);
 			kunmap_skb_frag(vaddr);
 
 			if (!(len -= copy))
 				return sum;
 			offset += copy;
 		}
+		start = end;
 	}
 
 	if (skb_shinfo(skb)->frag_list) {
 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
 
 		for (; list; list = list->next) {
-			BUG_TRAP(len >= 0);
+			int end;
 
-			end = offset + list->len;
+			BUG_TRAP(start <= offset + len);
+
+			end = start + list->len;
 			if ((copy = end - offset) > 0) {
 				if (copy > len)
 					copy = len;
-				sum = atalk_sum_skb(list, 0, copy, sum);
+				sum = atalk_sum_skb(list, offset - start,
+						    copy, sum);
 				if ((len -= copy) == 0)
 					return sum;
 				offset += copy;
 			}
+			start = end;
 		}
 	}
 
diff --git a/net/core/datagram.c b/net/core/datagram.c
index e1afa76..cb056f4 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -247,8 +247,8 @@
 int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
 			    struct iovec *to, int len)
 {
-	int end = skb_headlen(skb);
-	int i, copy = end - offset;
+	int start = skb_headlen(skb);
+	int i, copy = start - offset;
 
 	/* Copy header. */
 	if (copy > 0) {
@@ -263,9 +263,11 @@
 
 	/* Copy paged appendix. Hmm... why does this look so complicated? */
 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-		BUG_TRAP(len >= 0);
+		int end;
 
-		end = offset + skb_shinfo(skb)->frags[i].size;
+		BUG_TRAP(start <= offset + len);
+
+		end = start + skb_shinfo(skb)->frags[i].size;
 		if ((copy = end - offset) > 0) {
 			int err;
 			u8  *vaddr;
@@ -275,8 +277,8 @@
 			if (copy > len)
 				copy = len;
 			vaddr = kmap(page);
-			err = memcpy_toiovec(to, vaddr + frag->page_offset,
-					     copy);
+			err = memcpy_toiovec(to, vaddr + frag->page_offset +
+					     offset - start, copy);
 			kunmap(page);
 			if (err)
 				goto fault;
@@ -284,24 +286,30 @@
 				return 0;
 			offset += copy;
 		}
+		start = end;
 	}
 
 	if (skb_shinfo(skb)->frag_list) {
 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
 
 		for (; list; list = list->next) {
-			BUG_TRAP(len >= 0);
+			int end;
 
-			end = offset + list->len;
+			BUG_TRAP(start <= offset + len);
+
+			end = start + list->len;
 			if ((copy = end - offset) > 0) {
 				if (copy > len)
 					copy = len;
-				if (skb_copy_datagram_iovec(list, 0, to, copy))
+				if (skb_copy_datagram_iovec(list,
+							    offset - start,
+							    to, copy))
 					goto fault;
 				if ((len -= copy) == 0)
 					return 0;
 				offset += copy;
 			}
+			start = end;
 		}
 	}
 	if (!len)
@@ -315,9 +323,9 @@
 				      u8 __user *to, int len,
 				      __wsum *csump)
 {
-	int end = skb_headlen(skb);
+	int start = skb_headlen(skb);
 	int pos = 0;
-	int i, copy = end - offset;
+	int i, copy = start - offset;
 
 	/* Copy header. */
 	if (copy > 0) {
@@ -336,9 +344,11 @@
 	}
 
 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-		BUG_TRAP(len >= 0);
+		int end;
 
-		end = offset + skb_shinfo(skb)->frags[i].size;
+		BUG_TRAP(start <= offset + len);
+
+		end = start + skb_shinfo(skb)->frags[i].size;
 		if ((copy = end - offset) > 0) {
 			__wsum csum2;
 			int err = 0;
@@ -350,7 +360,8 @@
 				copy = len;
 			vaddr = kmap(page);
 			csum2 = csum_and_copy_to_user(vaddr +
-							frag->page_offset,
+							frag->page_offset +
+							offset - start,
 						      to, copy, 0, &err);
 			kunmap(page);
 			if (err)
@@ -362,20 +373,24 @@
 			to += copy;
 			pos += copy;
 		}
+		start = end;
 	}
 
 	if (skb_shinfo(skb)->frag_list) {
 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
 
 		for (; list; list=list->next) {
-			BUG_TRAP(len >= 0);
+			int end;
 
-			end = offset + list->len;
+			BUG_TRAP(start <= offset + len);
+
+			end = start + list->len;
 			if ((copy = end - offset) > 0) {
 				__wsum csum2 = 0;
 				if (copy > len)
 					copy = len;
-				if (skb_copy_and_csum_datagram(list, 0,
+				if (skb_copy_and_csum_datagram(list,
+							       offset - start,
 							       to, copy,
 							       &csum2))
 					goto fault;
@@ -386,6 +401,7 @@
 				to += copy;
 				pos += copy;
 			}
+			start = end;
 		}
 	}
 	if (!len)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 32f087b..1422573 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1045,13 +1045,13 @@
 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
 {
 	int i, copy;
-	int end = skb_headlen(skb);
+	int start = skb_headlen(skb);
 
 	if (offset > (int)skb->len - len)
 		goto fault;
 
 	/* Copy header. */
-	if ((copy = end - offset) > 0) {
+	if ((copy = start - offset) > 0) {
 		if (copy > len)
 			copy = len;
 		skb_copy_from_linear_data_offset(skb, offset, to, copy);
@@ -1062,9 +1062,11 @@
 	}
 
 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-		BUG_TRAP(len >= 0);
+		int end;
 
-		end = offset + skb_shinfo(skb)->frags[i].size;
+		BUG_TRAP(start <= offset + len);
+
+		end = start + skb_shinfo(skb)->frags[i].size;
 		if ((copy = end - offset) > 0) {
 			u8 *vaddr;
 
@@ -1073,8 +1075,8 @@
 
 			vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
 			memcpy(to,
-			       vaddr + skb_shinfo(skb)->frags[i].page_offset,
-			       copy);
+			       vaddr + skb_shinfo(skb)->frags[i].page_offset+
+			       offset - start, copy);
 			kunmap_skb_frag(vaddr);
 
 			if ((len -= copy) == 0)
@@ -1082,25 +1084,30 @@
 			offset += copy;
 			to     += copy;
 		}
+		start = end;
 	}
 
 	if (skb_shinfo(skb)->frag_list) {
 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
 
 		for (; list; list = list->next) {
-			BUG_TRAP(len >= 0);
+			int end;
 
-			end = offset + list->len;
+			BUG_TRAP(start <= offset + len);
+
+			end = start + list->len;
 			if ((copy = end - offset) > 0) {
 				if (copy > len)
 					copy = len;
-				if (skb_copy_bits(list, 0, to, copy))
+				if (skb_copy_bits(list, offset - start,
+						  to, copy))
 					goto fault;
 				if ((len -= copy) == 0)
 					return 0;
 				offset += copy;
 				to     += copy;
 			}
+			start = end;
 		}
 	}
 	if (!len)
@@ -1125,12 +1132,12 @@
 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
 {
 	int i, copy;
-	int end = skb_headlen(skb);
+	int start = skb_headlen(skb);
 
 	if (offset > (int)skb->len - len)
 		goto fault;
 
-	if ((copy = end - offset) > 0) {
+	if ((copy = start - offset) > 0) {
 		if (copy > len)
 			copy = len;
 		skb_copy_to_linear_data_offset(skb, offset, from, copy);
@@ -1142,9 +1149,11 @@
 
 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-		BUG_TRAP(len >= 0);
+		int end;
 
-		end = offset + frag->size;
+		BUG_TRAP(start <= offset + len);
+
+		end = start + frag->size;
 		if ((copy = end - offset) > 0) {
 			u8 *vaddr;
 
@@ -1152,7 +1161,8 @@
 				copy = len;
 
 			vaddr = kmap_skb_frag(frag);
-			memcpy(vaddr + frag->page_offset, from, copy);
+			memcpy(vaddr + frag->page_offset + offset - start,
+			       from, copy);
 			kunmap_skb_frag(vaddr);
 
 			if ((len -= copy) == 0)
@@ -1160,25 +1170,30 @@
 			offset += copy;
 			from += copy;
 		}
+		start = end;
 	}
 
 	if (skb_shinfo(skb)->frag_list) {
 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
 
 		for (; list; list = list->next) {
-			BUG_TRAP(len >= 0);
+			int end;
 
-			end = offset + list->len;
+			BUG_TRAP(start <= offset + len);
+
+			end = start + list->len;
 			if ((copy = end - offset) > 0) {
 				if (copy > len)
 					copy = len;
-				if (skb_store_bits(list, 0, from, copy))
+				if (skb_store_bits(list, offset - start,
+						   from, copy))
 					goto fault;
 				if ((len -= copy) == 0)
 					return 0;
 				offset += copy;
 				from += copy;
 			}
+			start = end;
 		}
 	}
 	if (!len)
@@ -1195,8 +1210,8 @@
 __wsum skb_checksum(const struct sk_buff *skb, int offset,
 			  int len, __wsum csum)
 {
-	int end = skb_headlen(skb);
-	int i, copy = end - offset;
+	int start = skb_headlen(skb);
+	int i, copy = start - offset;
 	int pos = 0;
 
 	/* Checksum header. */
@@ -1211,9 +1226,11 @@
 	}
 
 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-		BUG_TRAP(len >= 0);
+		int end;
 
-		end = offset + skb_shinfo(skb)->frags[i].size;
+		BUG_TRAP(start <= offset + len);
+
+		end = start + skb_shinfo(skb)->frags[i].size;
 		if ((copy = end - offset) > 0) {
 			__wsum csum2;
 			u8 *vaddr;
@@ -1222,8 +1239,8 @@
 			if (copy > len)
 				copy = len;
 			vaddr = kmap_skb_frag(frag);
-			csum2 = csum_partial(vaddr + frag->page_offset,
-					     copy, 0);
+			csum2 = csum_partial(vaddr + frag->page_offset +
+					     offset - start, copy, 0);
 			kunmap_skb_frag(vaddr);
 			csum = csum_block_add(csum, csum2, pos);
 			if (!(len -= copy))
@@ -1231,26 +1248,31 @@
 			offset += copy;
 			pos    += copy;
 		}
+		start = end;
 	}
 
 	if (skb_shinfo(skb)->frag_list) {
 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
 
 		for (; list; list = list->next) {
-			BUG_TRAP(len >= 0);
+			int end;
 
-			end = offset + list->len;
+			BUG_TRAP(start <= offset + len);
+
+			end = start + list->len;
 			if ((copy = end - offset) > 0) {
 				__wsum csum2;
 				if (copy > len)
 					copy = len;
-				csum2 = skb_checksum(list, 0, copy, 0);
+				csum2 = skb_checksum(list, offset - start,
+						     copy, 0);
 				csum = csum_block_add(csum, csum2, pos);
 				if ((len -= copy) == 0)
 					return csum;
 				offset += copy;
 				pos    += copy;
 			}
+			start = end;
 		}
 	}
 	BUG_ON(len);
@@ -1263,8 +1285,8 @@
 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
 				    u8 *to, int len, __wsum csum)
 {
-	int end = skb_headlen(skb);
-	int i, copy = end - offset;
+	int start = skb_headlen(skb);
+	int i, copy = start - offset;
 	int pos = 0;
 
 	/* Copy header. */
@@ -1281,9 +1303,11 @@
 	}
 
 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-		BUG_TRAP(len >= 0);
+		int end;
 
-		end = offset + skb_shinfo(skb)->frags[i].size;
+		BUG_TRAP(start <= offset + len);
+
+		end = start + skb_shinfo(skb)->frags[i].size;
 		if ((copy = end - offset) > 0) {
 			__wsum csum2;
 			u8 *vaddr;
@@ -1293,8 +1317,9 @@
 				copy = len;
 			vaddr = kmap_skb_frag(frag);
 			csum2 = csum_partial_copy_nocheck(vaddr +
-							  frag->page_offset,
-							  to, copy, 0);
+							  frag->page_offset +
+							  offset - start, to,
+							  copy, 0);
 			kunmap_skb_frag(vaddr);
 			csum = csum_block_add(csum, csum2, pos);
 			if (!(len -= copy))
@@ -1303,6 +1328,7 @@
 			to     += copy;
 			pos    += copy;
 		}
+		start = end;
 	}
 
 	if (skb_shinfo(skb)->frag_list) {
@@ -1310,13 +1336,16 @@
 
 		for (; list; list = list->next) {
 			__wsum csum2;
-			BUG_TRAP(len >= 0);
+			int end;
 
-			end = offset + list->len;
+			BUG_TRAP(start <= offset + len);
+
+			end = start + list->len;
 			if ((copy = end - offset) > 0) {
 				if (copy > len)
 					copy = len;
-				csum2 = skb_copy_and_csum_bits(list, 0,
+				csum2 = skb_copy_and_csum_bits(list,
+							       offset - start,
 							       to, copy, 0);
 				csum = csum_block_add(csum, csum2, pos);
 				if ((len -= copy) == 0)
@@ -1325,6 +1354,7 @@
 				to     += copy;
 				pos    += copy;
 			}
+			start = end;
 		}
 	}
 	BUG_ON(len);
@@ -1996,8 +2026,8 @@
 int
 skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
 {
-	int end = skb_headlen(skb);
-	int i, copy = end - offset;
+	int start = skb_headlen(skb);
+	int i, copy = start - offset;
 	int elt = 0;
 
 	if (copy > 0) {
@@ -2013,39 +2043,45 @@
 	}
 
 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-		BUG_TRAP(len >= 0);
+		int end;
 
-		end = offset + skb_shinfo(skb)->frags[i].size;
+		BUG_TRAP(start <= offset + len);
+
+		end = start + skb_shinfo(skb)->frags[i].size;
 		if ((copy = end - offset) > 0) {
 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
 			if (copy > len)
 				copy = len;
 			sg[elt].page = frag->page;
-			sg[elt].offset = frag->page_offset;
+			sg[elt].offset = frag->page_offset+offset-start;
 			sg[elt].length = copy;
 			elt++;
 			if (!(len -= copy))
 				return elt;
 			offset += copy;
 		}
+		start = end;
 	}
 
 	if (skb_shinfo(skb)->frag_list) {
 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
 
 		for (; list; list = list->next) {
-			BUG_TRAP(len >= 0);
+			int end;
 
-			end = offset + list->len;
+			BUG_TRAP(start <= offset + len);
+
+			end = start + list->len;
 			if ((copy = end - offset) > 0) {
 				if (copy > len)
 					copy = len;
-				elt += skb_to_sgvec(list, sg+elt, 0, copy);
+				elt += skb_to_sgvec(list, sg+elt, offset - start, copy);
 				if ((len -= copy) == 0)
 					return elt;
 				offset += copy;
 			}
+			start = end;
 		}
 	}
 	BUG_ON(len);
diff --git a/net/core/user_dma.c b/net/core/user_dma.c
index 89241cd..0ad1cd5 100644
--- a/net/core/user_dma.c
+++ b/net/core/user_dma.c
@@ -49,8 +49,8 @@
 			struct sk_buff *skb, int offset, struct iovec *to,
 			size_t len, struct dma_pinned_list *pinned_list)
 {
-	int end = skb_headlen(skb);
-	int i, copy = end - offset;
+	int start = skb_headlen(skb);
+	int i, copy = start - offset;
 	dma_cookie_t cookie = 0;
 
 	/* Copy header. */
@@ -69,9 +69,11 @@
 
 	/* Copy paged appendix. Hmm... why does this look so complicated? */
 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-		BUG_TRAP(len >= 0);
+		int end;
 
-		end = offset + skb_shinfo(skb)->frags[i].size;
+		BUG_TRAP(start <= offset + len);
+
+		end = start + skb_shinfo(skb)->frags[i].size;
 		copy = end - offset;
 		if ((copy = end - offset) > 0) {
 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -80,8 +82,8 @@
 			if (copy > len)
 				copy = len;
 
-			cookie = dma_memcpy_pg_to_iovec(chan, to, pinned_list,
-					page, frag->page_offset, copy);
+			cookie = dma_memcpy_pg_to_iovec(chan, to, pinned_list, page,
+					frag->page_offset + offset - start, copy);
 			if (cookie < 0)
 				goto fault;
 			len -= copy;
@@ -89,21 +91,25 @@
 				goto end;
 			offset += copy;
 		}
+		start = end;
 	}
 
 	if (skb_shinfo(skb)->frag_list) {
 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
 
 		for (; list; list = list->next) {
-			BUG_TRAP(len >= 0);
+			int end;
 
-			end = offset + list->len;
+			BUG_TRAP(start <= offset + len);
+
+			end = start + list->len;
 			copy = end - offset;
 			if (copy > 0) {
 				if (copy > len)
 					copy = len;
 				cookie = dma_skb_copy_datagram_iovec(chan, list,
-						0, to, copy, pinned_list);
+						offset - start, to, copy,
+						pinned_list);
 				if (cookie < 0)
 					goto fault;
 				len -= copy;
@@ -111,6 +117,7 @@
 					goto end;
 				offset += copy;
 			}
+			start = end;
 		}
 	}
 
diff --git a/net/rxrpc/Kconfig b/net/rxrpc/Kconfig
index d72380e..8750f6d 100644
--- a/net/rxrpc/Kconfig
+++ b/net/rxrpc/Kconfig
@@ -30,6 +30,11 @@
 config RXKAD
 	tristate "RxRPC Kerberos security"
 	depends on AF_RXRPC && KEYS
+	select CRYPTO
+	select CRYPTO_MANAGER
+	select CRYPTO_BLKCIPHER
+	select CRYPTO_PCBC
+	select CRYPTO_FCRYPT
 	help
 	  Provide kerberos 4 and AFS kaserver security handling for AF_RXRPC
 	  through the use of the key retention service.
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index 1eaf529..5ec7051 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -18,6 +18,7 @@
 #include <linux/ctype.h>
 #include <net/sock.h>
 #include <net/af_rxrpc.h>
+#define rxrpc_debug rxkad_debug
 #include "ar-internal.h"
 
 #define RXKAD_VERSION			2
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index be529c4..6249a94 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -532,8 +532,8 @@
 int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
 		 int offset, int len, icv_update_fn_t icv_update)
 {
-	int end = skb_headlen(skb);
-	int i, copy = end - offset;
+	int start = skb_headlen(skb);
+	int i, copy = start - offset;
 	int err;
 	struct scatterlist sg;
 
@@ -556,9 +556,11 @@
 	}
 
 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-		BUG_TRAP(len >= 0);
+		int end;
 
-		end = offset + skb_shinfo(skb)->frags[i].size;
+		BUG_TRAP(start <= offset + len);
+
+		end = start + skb_shinfo(skb)->frags[i].size;
 		if ((copy = end - offset) > 0) {
 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
@@ -566,7 +568,7 @@
 				copy = len;
 
 			sg.page = frag->page;
-			sg.offset = frag->page_offset;
+			sg.offset = frag->page_offset + offset-start;
 			sg.length = copy;
 
 			err = icv_update(desc, &sg, copy);
@@ -577,19 +579,22 @@
 				return 0;
 			offset += copy;
 		}
+		start = end;
 	}
 
 	if (skb_shinfo(skb)->frag_list) {
 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
 
 		for (; list; list = list->next) {
-			BUG_TRAP(len >= 0);
+			int end;
 
-			end = offset + list->len;
+			BUG_TRAP(start <= offset + len);
+
+			end = start + list->len;
 			if ((copy = end - offset) > 0) {
 				if (copy > len)
 					copy = len;
-				err = skb_icv_walk(list, desc, 0,
+				err = skb_icv_walk(list, desc, offset-start,
 						   copy, icv_update);
 				if (unlikely(err))
 					return err;
@@ -597,6 +602,7 @@
 					return 0;
 				offset += copy;
 			}
+			start = end;
 		}
 	}
 	BUG_ON(len);