Linux-2.6.12-rc2

Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.

Let it rip!
diff --git a/arch/sparc64/lib/Makefile b/arch/sparc64/lib/Makefile
new file mode 100644
index 0000000..40dbeec
--- /dev/null
+++ b/arch/sparc64/lib/Makefile
@@ -0,0 +1,20 @@
+# $Id: Makefile,v 1.25 2000/12/14 22:57:25 davem Exp $
+# Makefile for Sparc64 library files..
+#
+
+EXTRA_AFLAGS := -ansi
+EXTRA_CFLAGS := -Werror
+
+lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \
+	 memscan.o strncpy_from_user.o strlen_user.o memcmp.o checksum.o \
+	 bzero.o csum_copy.o csum_copy_from_user.o csum_copy_to_user.o \
+	 VISsave.o atomic.o bitops.o \
+	 U1memcpy.o U1copy_from_user.o U1copy_to_user.o \
+	 U3memcpy.o U3copy_from_user.o U3copy_to_user.o U3patch.o \
+	 copy_in_user.o user_fixup.o memmove.o \
+	 mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o
+
+lib-$(CONFIG_DEBUG_SPINLOCK) += debuglocks.o
+lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o
+
+obj-y += iomap.o
diff --git a/arch/sparc64/lib/PeeCeeI.c b/arch/sparc64/lib/PeeCeeI.c
new file mode 100644
index 0000000..3008d53
--- /dev/null
+++ b/arch/sparc64/lib/PeeCeeI.c
@@ -0,0 +1,237 @@
+/* $Id: PeeCeeI.c,v 1.4 1999/09/06 01:17:35 davem Exp $
+ * PeeCeeI.c: The emerging standard...
+ *
+ * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <asm/io.h>
+#include <asm/byteorder.h>
+
+void outsb(void __iomem *addr, const void *src, unsigned long count)
+{
+	const u8 *p = src;
+
+	while(count--)
+		outb(*p++, addr);
+}
+
+void outsw(void __iomem *addr, const void *src, unsigned long count)
+{
+	if(count) {
+		u16 *ps = (u16 *)src;
+		u32 *pi;
+
+		if(((u64)src) & 0x2) {
+			u16 val = le16_to_cpup(ps);
+			outw(val, addr);
+			ps++;
+			count--;
+		}
+		pi = (u32 *)ps;
+		while(count >= 2) {
+			u32 w = le32_to_cpup(pi);
+
+			pi++;
+			outw(w >> 0, addr);
+			outw(w >> 16, addr);
+			count -= 2;
+		}
+		ps = (u16 *)pi;
+		if(count) {
+			u16 val = le16_to_cpup(ps);
+			outw(val, addr);
+		}
+	}
+}
+
+void outsl(void __iomem *addr, const void *src, unsigned long count)
+{
+	if(count) {
+		if((((u64)src) & 0x3) == 0) {
+			u32 *p = (u32 *)src;
+			while(count--) {
+				u32 val = cpu_to_le32p(p);
+				outl(val, addr);
+				p++;
+			}
+		} else {
+			u8 *pb;
+			u16 *ps = (u16 *)src;
+			u32 l = 0, l2;
+			u32 *pi;
+
+			switch(((u64)src) & 0x3) {
+			case 0x2:
+				count -= 1;
+				l = cpu_to_le16p(ps) << 16;
+				ps++;
+				pi = (u32 *)ps;
+				while(count--) {
+					l2 = cpu_to_le32p(pi);
+					pi++;
+					outl(((l >> 16) | (l2 << 16)), addr);
+					l = l2;
+				}
+				ps = (u16 *)pi;
+				l2 = cpu_to_le16p(ps);
+				outl(((l >> 16) | (l2 << 16)), addr);
+				break;
+
+			case 0x1:
+				count -= 1;
+				pb = (u8 *)src;
+				l = (*pb++ << 8);
+				ps = (u16 *)pb;
+				l2 = cpu_to_le16p(ps);
+				ps++;
+				l |= (l2 << 16);
+				pi = (u32 *)ps;
+				while(count--) {
+					l2 = cpu_to_le32p(pi);
+					pi++;
+					outl(((l >> 8) | (l2 << 24)), addr);
+					l = l2;
+				}
+				pb = (u8 *)pi;
+				outl(((l >> 8) | (*pb << 24)), addr);
+				break;
+
+			case 0x3:
+				count -= 1;
+				pb = (u8 *)src;
+				l = (*pb++ << 24);
+				pi = (u32 *)pb;
+				while(count--) {
+					l2 = cpu_to_le32p(pi);
+					pi++;
+					outl(((l >> 24) | (l2 << 8)), addr);
+					l = l2;
+				}
+				ps = (u16 *)pi;
+				l2 = cpu_to_le16p(ps);
+				ps++;
+				pb = (u8 *)ps;
+				l2 |= (*pb << 16);
+				outl(((l >> 24) | (l2 << 8)), addr);
+				break;
+			}
+		}
+	}
+}
+
+void insb(void __iomem *addr, void *dst, unsigned long count)
+{
+	if(count) {
+		u32 *pi;
+		u8 *pb = dst;
+
+		while((((unsigned long)pb) & 0x3) && count--)
+			*pb++ = inb(addr);
+		pi = (u32 *)pb;
+		while(count >= 4) {
+			u32 w;
+
+			w  = (inb(addr) << 24);
+			w |= (inb(addr) << 16);
+			w |= (inb(addr) << 8);
+			w |= (inb(addr) << 0);
+			*pi++ = w;
+			count -= 4;
+		}
+		pb = (u8 *)pi;
+		while(count--)
+			*pb++ = inb(addr);
+	}
+}
+
+void insw(void __iomem *addr, void *dst, unsigned long count)
+{
+	if(count) {
+		u16 *ps = dst;
+		u32 *pi;
+
+		if(((unsigned long)ps) & 0x2) {
+			*ps++ = le16_to_cpu(inw(addr));
+			count--;
+		}
+		pi = (u32 *)ps;
+		while(count >= 2) {
+			u32 w;
+
+			w  = (le16_to_cpu(inw(addr)) << 16);
+			w |= (le16_to_cpu(inw(addr)) << 0);
+			*pi++ = w;
+			count -= 2;
+		}
+		ps = (u16 *)pi;
+		if(count)
+			*ps = le16_to_cpu(inw(addr));
+	}
+}
+
+void insl(void __iomem *addr, void *dst, unsigned long count)
+{
+	if(count) {
+		if((((unsigned long)dst) & 0x3) == 0) {
+			u32 *pi = dst;
+			while(count--)
+				*pi++ = le32_to_cpu(inl(addr));
+		} else {
+			u32 l = 0, l2, *pi;
+			u16 *ps;
+			u8 *pb;
+
+			switch(((unsigned long)dst) & 3) {
+			case 0x2:
+				ps = dst;
+				count -= 1;
+				l = le32_to_cpu(inl(addr));
+				*ps++ = l;
+				pi = (u32 *)ps;
+				while(count--) {
+					l2 = le32_to_cpu(inl(addr));
+					*pi++ = (l << 16) | (l2 >> 16);
+					l = l2;
+				}
+				ps = (u16 *)pi;
+				*ps = l;
+				break;
+
+			case 0x1:
+				pb = dst;
+				count -= 1;
+				l = le32_to_cpu(inl(addr));
+				*pb++ = l >> 24;
+				ps = (u16 *)pb;
+				*ps++ = ((l >> 8) & 0xffff);
+				pi = (u32 *)ps;
+				while(count--) {
+					l2 = le32_to_cpu(inl(addr));
+					*pi++ = (l << 24) | (l2 >> 8);
+					l = l2;
+				}
+				pb = (u8 *)pi;
+				*pb = l;
+				break;
+
+			case 0x3:
+				pb = (u8 *)dst;
+				count -= 1;
+				l = le32_to_cpu(inl(addr));
+				*pb++ = l >> 24;
+				pi = (u32 *)pb;
+				while(count--) {
+					l2 = le32_to_cpu(inl(addr));
+					*pi++ = (l << 8) | (l2 >> 24);
+					l = l2;
+				}
+				ps = (u16 *)pi;
+				*ps++ = ((l >> 8) & 0xffff);
+				pb = (u8 *)ps;
+				*pb = l;
+				break;
+			}
+		}
+	}
+}
+
diff --git a/arch/sparc64/lib/U1copy_from_user.S b/arch/sparc64/lib/U1copy_from_user.S
new file mode 100644
index 0000000..93146a8
--- /dev/null
+++ b/arch/sparc64/lib/U1copy_from_user.S
@@ -0,0 +1,33 @@
+/* U1copy_from_user.S: UltraSparc-I/II/IIi/IIe optimized copy from userspace.
+ *
+ * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
+ */
+
+#define EX_LD(x)		\
+98:	x;			\
+	.section .fixup;	\
+	.align 4;		\
+99:	retl;			\
+	 mov	1, %o0;		\
+	.section __ex_table;	\
+	.align 4;		\
+	.word 98b, 99b;		\
+	.text;			\
+	.align 4;
+
+#define FUNC_NAME		___copy_from_user
+#define LOAD(type,addr,dest)	type##a [addr] %asi, dest
+#define LOAD_BLK(addr,dest)	ldda [addr] ASI_BLK_AIUS, dest
+#define EX_RETVAL(x)		0
+
+	/* Writing to %asi is _expensive_ so we hardcode it.
+	 * Reading %asi to check for KERNEL_DS is comparatively
+	 * cheap.
+	 */
+#define PREAMBLE					\
+	rd		%asi, %g1;			\
+	cmp		%g1, ASI_AIUS;			\
+	bne,pn		%icc, memcpy_user_stub;		\
+	 nop;						\
+
+#include "U1memcpy.S"
diff --git a/arch/sparc64/lib/U1copy_to_user.S b/arch/sparc64/lib/U1copy_to_user.S
new file mode 100644
index 0000000..1fccc52
--- /dev/null
+++ b/arch/sparc64/lib/U1copy_to_user.S
@@ -0,0 +1,33 @@
+/* U1copy_to_user.S: UltraSparc-I/II/IIi/IIe optimized copy to userspace.
+ *
+ * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
+ */
+
+#define EX_ST(x)		\
+98:	x;			\
+	.section .fixup;	\
+	.align 4;		\
+99:	retl;			\
+	 mov	1, %o0;		\
+	.section __ex_table;	\
+	.align 4;		\
+	.word 98b, 99b;		\
+	.text;			\
+	.align 4;
+
+#define FUNC_NAME		___copy_to_user
+#define STORE(type,src,addr)	type##a src, [addr] ASI_AIUS
+#define STORE_BLK(src,addr)	stda src, [addr] ASI_BLK_AIUS
+#define EX_RETVAL(x)		0
+
+	/* Writing to %asi is _expensive_ so we hardcode it.
+	 * Reading %asi to check for KERNEL_DS is comparatively
+	 * cheap.
+	 */
+#define PREAMBLE					\
+	rd		%asi, %g1;			\
+	cmp		%g1, ASI_AIUS;			\
+	bne,pn		%icc, memcpy_user_stub;		\
+	 nop;						\
+
+#include "U1memcpy.S"
diff --git a/arch/sparc64/lib/U1memcpy.S b/arch/sparc64/lib/U1memcpy.S
new file mode 100644
index 0000000..da9b520
--- /dev/null
+++ b/arch/sparc64/lib/U1memcpy.S
@@ -0,0 +1,560 @@
+/* U1memcpy.S: UltraSPARC-I/II/IIi/IIe optimized memcpy.
+ *
+ * Copyright (C) 1997, 2004 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1996, 1997, 1998, 1999 Jakub Jelinek (jj@ultra.linux.cz)
+ */
+
+#ifdef __KERNEL__
+#include <asm/visasm.h>
+#include <asm/asi.h>
+#define GLOBAL_SPARE	g7
+#else
+#define GLOBAL_SPARE	g5
+#define ASI_BLK_P 0xf0
+#define FPRS_FEF  0x04
+#ifdef MEMCPY_DEBUG
+#define VISEntry rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs; \
+		 clr %g1; clr %g2; clr %g3; subcc %g0, %g0, %g0;
+#define VISExit and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
+#else
+#define VISEntry rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
+#define VISExit and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
+#endif
+#endif
+
+#ifndef EX_LD
+#define EX_LD(x)	x
+#endif
+
+#ifndef EX_ST
+#define EX_ST(x)	x
+#endif
+
+#ifndef EX_RETVAL
+#define EX_RETVAL(x)	x
+#endif
+
+#ifndef LOAD
+#define LOAD(type,addr,dest)	type [addr], dest
+#endif
+
+#ifndef LOAD_BLK
+#define LOAD_BLK(addr,dest)	ldda [addr] ASI_BLK_P, dest
+#endif
+
+#ifndef STORE
+#define STORE(type,src,addr)	type src, [addr]
+#endif
+
+#ifndef STORE_BLK
+#define STORE_BLK(src,addr)	stda src, [addr] ASI_BLK_P
+#endif
+
+#ifndef FUNC_NAME
+#define FUNC_NAME	memcpy
+#endif
+
+#ifndef PREAMBLE
+#define PREAMBLE
+#endif
+
+#ifndef XCC
+#define XCC xcc
+#endif
+
+#define FREG_FROB(f1, f2, f3, f4, f5, f6, f7, f8, f9)		\
+	faligndata		%f1, %f2, %f48;			\
+	faligndata		%f2, %f3, %f50;			\
+	faligndata		%f3, %f4, %f52;			\
+	faligndata		%f4, %f5, %f54;			\
+	faligndata		%f5, %f6, %f56;			\
+	faligndata		%f6, %f7, %f58;			\
+	faligndata		%f7, %f8, %f60;			\
+	faligndata		%f8, %f9, %f62;
+
+#define MAIN_LOOP_CHUNK(src, dest, fdest, fsrc, len, jmptgt)	\
+	EX_LD(LOAD_BLK(%src, %fdest));				\
+	EX_ST(STORE_BLK(%fsrc, %dest));				\
+	add			%src, 0x40, %src;		\
+	subcc			%len, 0x40, %len;		\
+	be,pn			%xcc, jmptgt;			\
+	 add			%dest, 0x40, %dest;		\
+
+#define LOOP_CHUNK1(src, dest, len, branch_dest)		\
+	MAIN_LOOP_CHUNK(src, dest, f0,  f48, len, branch_dest)
+#define LOOP_CHUNK2(src, dest, len, branch_dest)		\
+	MAIN_LOOP_CHUNK(src, dest, f16, f48, len, branch_dest)
+#define LOOP_CHUNK3(src, dest, len, branch_dest)		\
+	MAIN_LOOP_CHUNK(src, dest, f32, f48, len, branch_dest)
+
+#define STORE_SYNC(dest, fsrc)				\
+	EX_ST(STORE_BLK(%fsrc, %dest));			\
+	add			%dest, 0x40, %dest;
+
+#define STORE_JUMP(dest, fsrc, target)			\
+	EX_ST(STORE_BLK(%fsrc, %dest));			\
+	add			%dest, 0x40, %dest;	\
+	ba,pt			%xcc, target;
+
+#define FINISH_VISCHUNK(dest, f0, f1, left)	\
+	subcc			%left, 8, %left;\
+	bl,pn			%xcc, 95f;	\
+	 faligndata		%f0, %f1, %f48;	\
+	EX_ST(STORE(std, %f48, %dest));		\
+	add			%dest, 8, %dest;
+
+#define UNEVEN_VISCHUNK_LAST(dest, f0, f1, left)	\
+	subcc			%left, 8, %left;	\
+	bl,pn			%xcc, 95f;		\
+	 fsrc1			%f0, %f1;
+
+#define UNEVEN_VISCHUNK(dest, f0, f1, left)		\
+	UNEVEN_VISCHUNK_LAST(dest, f0, f1, left)	\
+	ba,a,pt			%xcc, 93f;
+
+	.register	%g2,#scratch
+	.register	%g3,#scratch
+
+	.text
+	.align		64
+
+	.globl		FUNC_NAME
+	.type		FUNC_NAME,#function
+FUNC_NAME:		/* %o0=dst, %o1=src, %o2=len */
+	srlx		%o2, 31, %g2
+	cmp		%g2, 0
+	tne		%xcc, 5
+	PREAMBLE
+	mov		%o0, %o4
+	cmp		%o2, 0
+	be,pn		%XCC, 85f
+	 or		%o0, %o1, %o3
+	cmp		%o2, 16
+	blu,a,pn	%XCC, 80f
+	 or		%o3, %o2, %o3
+
+	cmp		%o2, (5 * 64)
+	blu,pt		%XCC, 70f
+	 andcc		%o3, 0x7, %g0
+
+	/* Clobbers o5/g1/g2/g3/g7/icc/xcc.  */
+	VISEntry
+
+	/* Is 'dst' already aligned on an 64-byte boundary? */
+	andcc		%o0, 0x3f, %g2
+	be,pt		%XCC, 2f
+
+	/* Compute abs((dst & 0x3f) - 0x40) into %g2.  This is the number
+	 * of bytes to copy to make 'dst' 64-byte aligned.  We pre-
+	 * subtract this from 'len'.
+	 */
+	 sub		%o0, %o1, %GLOBAL_SPARE
+	sub		%g2, 0x40, %g2
+	sub		%g0, %g2, %g2
+	sub		%o2, %g2, %o2
+	andcc		%g2, 0x7, %g1
+	be,pt		%icc, 2f
+	 and		%g2, 0x38, %g2
+
+1:	subcc		%g1, 0x1, %g1
+	EX_LD(LOAD(ldub, %o1 + 0x00, %o3))
+	EX_ST(STORE(stb, %o3, %o1 + %GLOBAL_SPARE))
+	bgu,pt		%XCC, 1b
+	 add		%o1, 0x1, %o1
+
+	add		%o1, %GLOBAL_SPARE, %o0
+
+2:	cmp		%g2, 0x0
+	and		%o1, 0x7, %g1
+	be,pt		%icc, 3f
+	 alignaddr	%o1, %g0, %o1
+
+	EX_LD(LOAD(ldd, %o1, %f4))
+1:	EX_LD(LOAD(ldd, %o1 + 0x8, %f6))
+	add		%o1, 0x8, %o1
+	subcc		%g2, 0x8, %g2
+	faligndata	%f4, %f6, %f0
+	EX_ST(STORE(std, %f0, %o0))
+	be,pn		%icc, 3f
+	 add		%o0, 0x8, %o0
+
+	EX_LD(LOAD(ldd, %o1 + 0x8, %f4))
+	add		%o1, 0x8, %o1
+	subcc		%g2, 0x8, %g2
+	faligndata	%f6, %f4, %f0
+	EX_ST(STORE(std, %f0, %o0))
+	bne,pt		%icc, 1b
+	 add		%o0, 0x8, %o0
+
+	/* Destination is 64-byte aligned.  */
+3:	
+	membar		  #LoadStore | #StoreStore | #StoreLoad
+
+	subcc		%o2, 0x40, %GLOBAL_SPARE
+	add		%o1, %g1, %g1
+	andncc		%GLOBAL_SPARE, (0x40 - 1), %GLOBAL_SPARE
+	srl		%g1, 3, %g2
+	sub		%o2, %GLOBAL_SPARE, %g3
+	andn		%o1, (0x40 - 1), %o1
+	and		%g2, 7, %g2
+	andncc		%g3, 0x7, %g3
+	fmovd		%f0, %f2
+	sub		%g3, 0x8, %g3
+	sub		%o2, %GLOBAL_SPARE, %o2
+
+	add		%g1, %GLOBAL_SPARE, %g1
+	subcc		%o2, %g3, %o2
+
+	EX_LD(LOAD_BLK(%o1, %f0))
+	add		%o1, 0x40, %o1
+	add		%g1, %g3, %g1
+	EX_LD(LOAD_BLK(%o1, %f16))
+	add		%o1, 0x40, %o1
+	sub		%GLOBAL_SPARE, 0x80, %GLOBAL_SPARE
+	EX_LD(LOAD_BLK(%o1, %f32))
+	add		%o1, 0x40, %o1
+
+	/* There are 8 instances of the unrolled loop,
+	 * one for each possible alignment of the
+	 * source buffer.  Each loop instance is 452
+	 * bytes.
+	 */
+	sll		%g2, 3, %o3
+	sub		%o3, %g2, %o3
+	sllx		%o3, 4, %o3
+	add		%o3, %g2, %o3
+	sllx		%o3, 2, %g2
+1:	rd		%pc, %o3
+	add		%o3, %lo(1f - 1b), %o3
+	jmpl		%o3 + %g2, %g0
+	 nop
+
+	.align		64
+1:	FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16)
+	LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+	FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)
+	LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+	FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0)
+	LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+	ba,pt		%xcc, 1b+4
+	 faligndata	%f0, %f2, %f48
+1:	FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0)
+	STORE_JUMP(o0, f48, 40f) membar #Sync
+2:	FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16)
+	STORE_JUMP(o0, f48, 48f) membar #Sync
+3:	FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)
+	STORE_JUMP(o0, f48, 56f) membar #Sync
+
+1:	FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18)
+	LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+	FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)
+	LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+	FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2)
+	LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+	ba,pt		%xcc, 1b+4
+	 faligndata	%f2, %f4, %f48
+1:	FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2)
+	STORE_JUMP(o0, f48, 41f) membar #Sync
+2:	FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18)
+	STORE_JUMP(o0, f48, 49f) membar #Sync
+3:	FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)
+	STORE_JUMP(o0, f48, 57f) membar #Sync
+
+1:	FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20)
+	LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+	FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)
+	LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+	FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4)
+	LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+	ba,pt		%xcc, 1b+4
+	 faligndata	%f4, %f6, %f48
+1:	FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4)
+	STORE_JUMP(o0, f48, 42f) membar #Sync
+2:	FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20)
+	STORE_JUMP(o0, f48, 50f) membar #Sync
+3:	FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)
+	STORE_JUMP(o0, f48, 58f) membar #Sync
+
+1:	FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22)
+	LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+	FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)
+	LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+	FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6) 
+	LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+	ba,pt		%xcc, 1b+4
+	 faligndata	%f6, %f8, %f48
+1:	FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6)
+	STORE_JUMP(o0, f48, 43f) membar #Sync
+2:	FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22)
+	STORE_JUMP(o0, f48, 51f) membar #Sync
+3:	FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)
+	STORE_JUMP(o0, f48, 59f) membar #Sync
+
+1:	FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24)
+	LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+	FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)
+	LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+	FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8)
+	LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+	ba,pt		%xcc, 1b+4
+	 faligndata	%f8, %f10, %f48
+1:	FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8)
+	STORE_JUMP(o0, f48, 44f) membar #Sync
+2:	FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24)
+	STORE_JUMP(o0, f48, 52f) membar #Sync
+3:	FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)
+	STORE_JUMP(o0, f48, 60f) membar #Sync
+
+1:	FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26)
+	LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+	FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)
+	LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+	FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10)
+	LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+	ba,pt		%xcc, 1b+4
+	 faligndata	%f10, %f12, %f48
+1:	FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10)
+	STORE_JUMP(o0, f48, 45f) membar #Sync
+2:	FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26)
+	STORE_JUMP(o0, f48, 53f) membar #Sync
+3:	FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)
+	STORE_JUMP(o0, f48, 61f) membar #Sync
+
+1:	FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28)
+	LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+	FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)
+	LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+	FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12)
+	LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+	ba,pt		%xcc, 1b+4
+	 faligndata	%f12, %f14, %f48
+1:	FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12)
+	STORE_JUMP(o0, f48, 46f) membar #Sync
+2:	FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28)
+	STORE_JUMP(o0, f48, 54f) membar #Sync
+3:	FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)
+	STORE_JUMP(o0, f48, 62f) membar #Sync
+
+1:	FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30)
+	LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f)
+	FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
+	LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f)
+	FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14)
+	LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f)
+	ba,pt		%xcc, 1b+4
+	 faligndata	%f14, %f16, %f48
+1:	FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14)
+	STORE_JUMP(o0, f48, 47f) membar #Sync
+2:	FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30)
+	STORE_JUMP(o0, f48, 55f) membar #Sync
+3:	FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30)
+	STORE_SYNC(o0, f48) membar #Sync
+	FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
+	STORE_JUMP(o0, f48, 63f) membar #Sync
+
+40:	FINISH_VISCHUNK(o0, f0,  f2,  g3)
+41:	FINISH_VISCHUNK(o0, f2,  f4,  g3)
+42:	FINISH_VISCHUNK(o0, f4,  f6,  g3)
+43:	FINISH_VISCHUNK(o0, f6,  f8,  g3)
+44:	FINISH_VISCHUNK(o0, f8,  f10, g3)
+45:	FINISH_VISCHUNK(o0, f10, f12, g3)
+46:	FINISH_VISCHUNK(o0, f12, f14, g3)
+47:	UNEVEN_VISCHUNK(o0, f14, f0,  g3)
+48:	FINISH_VISCHUNK(o0, f16, f18, g3)
+49:	FINISH_VISCHUNK(o0, f18, f20, g3)
+50:	FINISH_VISCHUNK(o0, f20, f22, g3)
+51:	FINISH_VISCHUNK(o0, f22, f24, g3)
+52:	FINISH_VISCHUNK(o0, f24, f26, g3)
+53:	FINISH_VISCHUNK(o0, f26, f28, g3)
+54:	FINISH_VISCHUNK(o0, f28, f30, g3)
+55:	UNEVEN_VISCHUNK(o0, f30, f0,  g3)
+56:	FINISH_VISCHUNK(o0, f32, f34, g3)
+57:	FINISH_VISCHUNK(o0, f34, f36, g3)
+58:	FINISH_VISCHUNK(o0, f36, f38, g3)
+59:	FINISH_VISCHUNK(o0, f38, f40, g3)
+60:	FINISH_VISCHUNK(o0, f40, f42, g3)
+61:	FINISH_VISCHUNK(o0, f42, f44, g3)
+62:	FINISH_VISCHUNK(o0, f44, f46, g3)
+63:	UNEVEN_VISCHUNK_LAST(o0, f46, f0,  g3)
+
+93:	EX_LD(LOAD(ldd, %o1, %f2))
+	add		%o1, 8, %o1
+	subcc		%g3, 8, %g3
+	faligndata	%f0, %f2, %f8
+	EX_ST(STORE(std, %f8, %o0))
+	bl,pn		%xcc, 95f
+	 add		%o0, 8, %o0
+	EX_LD(LOAD(ldd, %o1, %f0))
+	add		%o1, 8, %o1
+	subcc		%g3, 8, %g3
+	faligndata	%f2, %f0, %f8
+	EX_ST(STORE(std, %f8, %o0))
+	bge,pt		%xcc, 93b
+	 add		%o0, 8, %o0
+
+95:	brz,pt		%o2, 2f
+	 mov		%g1, %o1
+
+1:	EX_LD(LOAD(ldub, %o1, %o3))
+	add		%o1, 1, %o1
+	subcc		%o2, 1, %o2
+	EX_ST(STORE(stb, %o3, %o0))
+	bne,pt		%xcc, 1b
+	 add		%o0, 1, %o0
+
+2:	membar		#StoreLoad | #StoreStore
+	VISExit
+	retl
+	 mov		EX_RETVAL(%o4), %o0
+
+	.align		64
+70:	/* 16 < len <= (5 * 64) */
+	bne,pn		%XCC, 75f
+	 sub		%o0, %o1, %o3
+
+72:	andn		%o2, 0xf, %GLOBAL_SPARE
+	and		%o2, 0xf, %o2
+1:	EX_LD(LOAD(ldx, %o1 + 0x00, %o5))
+	EX_LD(LOAD(ldx, %o1 + 0x08, %g1))
+	subcc		%GLOBAL_SPARE, 0x10, %GLOBAL_SPARE
+	EX_ST(STORE(stx, %o5, %o1 + %o3))
+	add		%o1, 0x8, %o1
+	EX_ST(STORE(stx, %g1, %o1 + %o3))
+	bgu,pt		%XCC, 1b
+	 add		%o1, 0x8, %o1
+73:	andcc		%o2, 0x8, %g0
+	be,pt		%XCC, 1f
+	 nop
+	EX_LD(LOAD(ldx, %o1, %o5))
+	sub		%o2, 0x8, %o2
+	EX_ST(STORE(stx, %o5, %o1 + %o3))
+	add		%o1, 0x8, %o1
+1:	andcc		%o2, 0x4, %g0
+	be,pt		%XCC, 1f
+	 nop
+	EX_LD(LOAD(lduw, %o1, %o5))
+	sub		%o2, 0x4, %o2
+	EX_ST(STORE(stw, %o5, %o1 + %o3))
+	add		%o1, 0x4, %o1
+1:	cmp		%o2, 0
+	be,pt		%XCC, 85f
+	 nop
+	ba,pt		%xcc, 90f
+	 nop
+
+75:	andcc		%o0, 0x7, %g1
+	sub		%g1, 0x8, %g1
+	be,pn		%icc, 2f
+	 sub		%g0, %g1, %g1
+	sub		%o2, %g1, %o2
+
+1:	EX_LD(LOAD(ldub, %o1, %o5))
+	subcc		%g1, 1, %g1
+	EX_ST(STORE(stb, %o5, %o1 + %o3))
+	bgu,pt		%icc, 1b
+	 add		%o1, 1, %o1
+
+2:	add		%o1, %o3, %o0
+	andcc		%o1, 0x7, %g1
+	bne,pt		%icc, 8f
+	 sll		%g1, 3, %g1
+
+	cmp		%o2, 16
+	bgeu,pt		%icc, 72b
+	 nop
+	ba,a,pt		%xcc, 73b
+
+8:	mov		64, %o3
+	andn		%o1, 0x7, %o1
+	EX_LD(LOAD(ldx, %o1, %g2))
+	sub		%o3, %g1, %o3
+	andn		%o2, 0x7, %GLOBAL_SPARE
+	sllx		%g2, %g1, %g2
+1:	EX_LD(LOAD(ldx, %o1 + 0x8, %g3))
+	subcc		%GLOBAL_SPARE, 0x8, %GLOBAL_SPARE
+	add		%o1, 0x8, %o1
+	srlx		%g3, %o3, %o5
+	or		%o5, %g2, %o5
+	EX_ST(STORE(stx, %o5, %o0))
+	add		%o0, 0x8, %o0
+	bgu,pt		%icc, 1b
+	 sllx		%g3, %g1, %g2
+
+	srl		%g1, 3, %g1
+	andcc		%o2, 0x7, %o2
+	be,pn		%icc, 85f
+	 add		%o1, %g1, %o1
+	ba,pt		%xcc, 90f
+	 sub		%o0, %o1, %o3
+
+	.align		64
+80:	/* 0 < len <= 16 */
+	andcc		%o3, 0x3, %g0
+	bne,pn		%XCC, 90f
+	 sub		%o0, %o1, %o3
+
+1:	EX_LD(LOAD(lduw, %o1, %g1))
+	subcc		%o2, 4, %o2
+	EX_ST(STORE(stw, %g1, %o1 + %o3))
+	bgu,pt		%XCC, 1b
+	 add		%o1, 4, %o1
+
+85:	retl
+	 mov		EX_RETVAL(%o4), %o0
+
+	.align		32
+90:	EX_LD(LOAD(ldub, %o1, %g1))
+	subcc		%o2, 1, %o2
+	EX_ST(STORE(stb, %g1, %o1 + %o3))
+	bgu,pt		%XCC, 90b
+	 add		%o1, 1, %o1
+	retl
+	 mov		EX_RETVAL(%o4), %o0
+
+	.size		FUNC_NAME, .-FUNC_NAME
diff --git a/arch/sparc64/lib/U3copy_from_user.S b/arch/sparc64/lib/U3copy_from_user.S
new file mode 100644
index 0000000..df600b6
--- /dev/null
+++ b/arch/sparc64/lib/U3copy_from_user.S
@@ -0,0 +1,22 @@
+/* U3copy_from_user.S: UltraSparc-III optimized copy from userspace.
+ *
+ * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
+ */
+
+#define EX_LD(x)		\
+98:	x;			\
+	.section .fixup;	\
+	.align 4;		\
+99:	retl;			\
+	 mov	1, %o0;		\
+	.section __ex_table;	\
+	.align 4;		\
+	.word 98b, 99b;		\
+	.text;			\
+	.align 4;
+
+#define FUNC_NAME		U3copy_from_user
+#define LOAD(type,addr,dest)	type##a [addr] %asi, dest
+#define EX_RETVAL(x)		0
+
+#include "U3memcpy.S"
diff --git a/arch/sparc64/lib/U3copy_to_user.S b/arch/sparc64/lib/U3copy_to_user.S
new file mode 100644
index 0000000..f337f22
--- /dev/null
+++ b/arch/sparc64/lib/U3copy_to_user.S
@@ -0,0 +1,33 @@
+/* U3copy_to_user.S: UltraSparc-III optimized copy to userspace.
+ *
+ * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
+ */
+
+#define EX_ST(x)		\
+98:	x;			\
+	.section .fixup;	\
+	.align 4;		\
+99:	retl;			\
+	 mov	1, %o0;		\
+	.section __ex_table;	\
+	.align 4;		\
+	.word 98b, 99b;		\
+	.text;			\
+	.align 4;
+
+#define FUNC_NAME		U3copy_to_user
+#define STORE(type,src,addr)	type##a src, [addr] ASI_AIUS
+#define STORE_BLK(src,addr)	stda src, [addr] ASI_BLK_AIUS
+#define EX_RETVAL(x)		0
+
+	/* Writing to %asi is _expensive_ so we hardcode it.
+	 * Reading %asi to check for KERNEL_DS is comparatively
+	 * cheap.
+	 */
+#define PREAMBLE					\
+	rd		%asi, %g1;			\
+	cmp		%g1, ASI_AIUS;			\
+	bne,pn		%icc, memcpy_user_stub;		\
+	 nop;						\
+
+#include "U3memcpy.S"
diff --git a/arch/sparc64/lib/U3memcpy.S b/arch/sparc64/lib/U3memcpy.S
new file mode 100644
index 0000000..7cae9cc
--- /dev/null
+++ b/arch/sparc64/lib/U3memcpy.S
@@ -0,0 +1,422 @@
+/* U3memcpy.S: UltraSparc-III optimized memcpy.
+ *
+ * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
+ */
+
+#ifdef __KERNEL__
+#include <asm/visasm.h>
+#include <asm/asi.h>
+#define GLOBAL_SPARE	%g7
+#else
+#define ASI_BLK_P 0xf0
+#define FPRS_FEF  0x04
+#ifdef MEMCPY_DEBUG
+#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs; \
+		     clr %g1; clr %g2; clr %g3; subcc %g0, %g0, %g0;
+#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
+#else
+#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
+#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
+#endif
+#define GLOBAL_SPARE	%g5
+#endif
+
+#ifndef EX_LD
+#define EX_LD(x)	x
+#endif
+
+#ifndef EX_ST
+#define EX_ST(x)	x
+#endif
+
+#ifndef EX_RETVAL
+#define EX_RETVAL(x)	x
+#endif
+
+#ifndef LOAD
+#define LOAD(type,addr,dest)	type [addr], dest
+#endif
+
+#ifndef STORE
+#define STORE(type,src,addr)	type src, [addr]
+#endif
+
+#ifndef STORE_BLK
+#define STORE_BLK(src,addr)	stda src, [addr] ASI_BLK_P
+#endif
+
+#ifndef FUNC_NAME
+#define FUNC_NAME	U3memcpy
+#endif
+
+#ifndef PREAMBLE
+#define PREAMBLE
+#endif
+
+#ifndef XCC
+#define XCC xcc
+#endif
+
+	.register	%g2,#scratch
+	.register	%g3,#scratch
+
+	/* Special/non-trivial issues of this code:
+	 *
+	 * 1) %o5 is preserved from VISEntryHalf to VISExitHalf
+	 * 2) Only low 32 FPU registers are used so that only the
+	 *    lower half of the FPU register set is dirtied by this
+	 *    code.  This is especially important in the kernel.
+	 * 3) This code never prefetches cachelines past the end
+	 *    of the source buffer.
+	 */
+
+	.text
+	.align		64
+
+	/* The cheetah's flexible spine, oversized liver, enlarged heart,
+	 * slender muscular body, and claws make it the swiftest hunter
+	 * in Africa and the fastest animal on land.  Can reach speeds
+	 * of up to 2.4GB per second.
+	 */
+
+	.globl	FUNC_NAME
+	.type	FUNC_NAME,#function
+FUNC_NAME:	/* %o0=dst, %o1=src, %o2=len */
+	srlx		%o2, 31, %g2
+	cmp		%g2, 0
+	tne		%xcc, 5
+	PREAMBLE
+	mov		%o0, %o4
+	cmp		%o2, 0
+	be,pn		%XCC, 85f
+	 or		%o0, %o1, %o3
+	cmp		%o2, 16
+	blu,a,pn	%XCC, 80f
+	 or		%o3, %o2, %o3
+
+	cmp		%o2, (3 * 64)
+	blu,pt		%XCC, 70f
+	 andcc		%o3, 0x7, %g0
+
+	/* Clobbers o5/g1/g2/g3/g7/icc/xcc.  We must preserve
+	 * o5 from here until we hit VISExitHalf.
+	 */
+	VISEntryHalf
+
+	/* Is 'dst' already aligned on an 64-byte boundary? */
+	andcc		%o0, 0x3f, %g2
+	be,pt		%XCC, 2f
+
+	/* Compute abs((dst & 0x3f) - 0x40) into %g2.  This is the number
+	 * of bytes to copy to make 'dst' 64-byte aligned.  We pre-
+	 * subtract this from 'len'.
+	 */
+	 sub		%o0, %o1, GLOBAL_SPARE
+	sub		%g2, 0x40, %g2
+	sub		%g0, %g2, %g2
+	sub		%o2, %g2, %o2
+	andcc		%g2, 0x7, %g1
+	be,pt		%icc, 2f
+	 and		%g2, 0x38, %g2
+
+1:	subcc		%g1, 0x1, %g1
+	EX_LD(LOAD(ldub, %o1 + 0x00, %o3))
+	EX_ST(STORE(stb, %o3, %o1 + GLOBAL_SPARE))
+	bgu,pt		%XCC, 1b
+	 add		%o1, 0x1, %o1
+
+	add		%o1, GLOBAL_SPARE, %o0
+
+2:	cmp		%g2, 0x0
+	and		%o1, 0x7, %g1
+	be,pt		%icc, 3f
+	 alignaddr	%o1, %g0, %o1
+
+	EX_LD(LOAD(ldd, %o1, %f4))
+1:	EX_LD(LOAD(ldd, %o1 + 0x8, %f6))
+	add		%o1, 0x8, %o1
+	subcc		%g2, 0x8, %g2
+	faligndata	%f4, %f6, %f0
+	EX_ST(STORE(std, %f0, %o0))
+	be,pn		%icc, 3f
+	 add		%o0, 0x8, %o0
+
+	EX_LD(LOAD(ldd, %o1 + 0x8, %f4))
+	add		%o1, 0x8, %o1
+	subcc		%g2, 0x8, %g2
+	faligndata	%f6, %f4, %f2
+	EX_ST(STORE(std, %f2, %o0))
+	bne,pt		%icc, 1b
+	 add		%o0, 0x8, %o0
+
+3:	LOAD(prefetch, %o1 + 0x000, #one_read)
+	LOAD(prefetch, %o1 + 0x040, #one_read)
+	andn		%o2, (0x40 - 1), GLOBAL_SPARE
+	LOAD(prefetch, %o1 + 0x080, #one_read)
+	LOAD(prefetch, %o1 + 0x0c0, #one_read)
+	LOAD(prefetch, %o1 + 0x100, #one_read)
+	EX_LD(LOAD(ldd, %o1 + 0x000, %f0))
+	LOAD(prefetch, %o1 + 0x140, #one_read)
+	EX_LD(LOAD(ldd, %o1 + 0x008, %f2))
+	LOAD(prefetch, %o1 + 0x180, #one_read)
+	EX_LD(LOAD(ldd, %o1 + 0x010, %f4))
+	LOAD(prefetch, %o1 + 0x1c0, #one_read)
+	faligndata	%f0, %f2, %f16
+	EX_LD(LOAD(ldd, %o1 + 0x018, %f6))
+	faligndata	%f2, %f4, %f18
+	EX_LD(LOAD(ldd, %o1 + 0x020, %f8))
+	faligndata	%f4, %f6, %f20
+	EX_LD(LOAD(ldd, %o1 + 0x028, %f10))
+	faligndata	%f6, %f8, %f22
+
+	EX_LD(LOAD(ldd, %o1 + 0x030, %f12))
+	faligndata	%f8, %f10, %f24
+	EX_LD(LOAD(ldd, %o1 + 0x038, %f14))
+	faligndata	%f10, %f12, %f26
+	EX_LD(LOAD(ldd, %o1 + 0x040, %f0))
+
+	subcc		GLOBAL_SPARE, 0x80, GLOBAL_SPARE
+	add		%o1, 0x40, %o1
+	bgu,pt		%XCC, 1f
+	 srl		GLOBAL_SPARE, 6, %o3
+	ba,pt		%xcc, 2f
+	 nop
+
+	.align		64
+1:
+	EX_LD(LOAD(ldd, %o1 + 0x008, %f2))
+	faligndata	%f12, %f14, %f28
+	EX_LD(LOAD(ldd, %o1 + 0x010, %f4))
+	faligndata	%f14, %f0, %f30
+	EX_ST(STORE_BLK(%f16, %o0))
+	EX_LD(LOAD(ldd, %o1 + 0x018, %f6))
+	faligndata	%f0, %f2, %f16
+	add		%o0, 0x40, %o0
+
+	EX_LD(LOAD(ldd, %o1 + 0x020, %f8))
+	faligndata	%f2, %f4, %f18
+	EX_LD(LOAD(ldd, %o1 + 0x028, %f10))
+	faligndata	%f4, %f6, %f20
+	EX_LD(LOAD(ldd, %o1 + 0x030, %f12))
+	subcc		%o3, 0x01, %o3
+	faligndata	%f6, %f8, %f22
+	EX_LD(LOAD(ldd, %o1 + 0x038, %f14))
+
+	faligndata	%f8, %f10, %f24
+	EX_LD(LOAD(ldd, %o1 + 0x040, %f0))
+	LOAD(prefetch, %o1 + 0x1c0, #one_read)
+	faligndata	%f10, %f12, %f26
+	bg,pt		%XCC, 1b
+	 add		%o1, 0x40, %o1
+
+	/* Finally we copy the last full 64-byte block. */
+2:
+	EX_LD(LOAD(ldd, %o1 + 0x008, %f2))
+	faligndata	%f12, %f14, %f28
+	EX_LD(LOAD(ldd, %o1 + 0x010, %f4))
+	faligndata	%f14, %f0, %f30
+	EX_ST(STORE_BLK(%f16, %o0))
+	EX_LD(LOAD(ldd, %o1 + 0x018, %f6))
+	faligndata	%f0, %f2, %f16
+	EX_LD(LOAD(ldd, %o1 + 0x020, %f8))
+	faligndata	%f2, %f4, %f18
+	EX_LD(LOAD(ldd, %o1 + 0x028, %f10))
+	faligndata	%f4, %f6, %f20
+	EX_LD(LOAD(ldd, %o1 + 0x030, %f12))
+	faligndata	%f6, %f8, %f22
+	EX_LD(LOAD(ldd, %o1 + 0x038, %f14))
+	faligndata	%f8, %f10, %f24
+	cmp		%g1, 0
+	be,pt		%XCC, 1f
+	 add		%o0, 0x40, %o0
+	EX_LD(LOAD(ldd, %o1 + 0x040, %f0))
+1:	faligndata	%f10, %f12, %f26
+	faligndata	%f12, %f14, %f28
+	faligndata	%f14, %f0, %f30
+	EX_ST(STORE_BLK(%f16, %o0))
+	add		%o0, 0x40, %o0
+	add		%o1, 0x40, %o1
+	membar		#Sync
+
+	/* Now we copy the (len modulo 64) bytes at the end.
+	 * Note how we borrow the %f0 loaded above.
+	 *
+	 * Also notice how this code is careful not to perform a
+	 * load past the end of the src buffer.
+	 */
+	and		%o2, 0x3f, %o2
+	andcc		%o2, 0x38, %g2
+	be,pn		%XCC, 2f
+	 subcc		%g2, 0x8, %g2
+	be,pn		%XCC, 2f
+	 cmp		%g1, 0
+
+	sub		%o2, %g2, %o2
+	be,a,pt		%XCC, 1f
+	 EX_LD(LOAD(ldd, %o1 + 0x00, %f0))
+
+1:	EX_LD(LOAD(ldd, %o1 + 0x08, %f2))
+	add		%o1, 0x8, %o1
+	subcc		%g2, 0x8, %g2
+	faligndata	%f0, %f2, %f8
+	EX_ST(STORE(std, %f8, %o0))
+	be,pn		%XCC, 2f
+	 add		%o0, 0x8, %o0
+	EX_LD(LOAD(ldd, %o1 + 0x08, %f0))
+	add		%o1, 0x8, %o1
+	subcc		%g2, 0x8, %g2
+	faligndata	%f2, %f0, %f8
+	EX_ST(STORE(std, %f8, %o0))
+	bne,pn		%XCC, 1b
+	 add		%o0, 0x8, %o0
+
+	/* If anything is left, we copy it one byte at a time.
+	 * Note that %g1 is (src & 0x3) saved above before the
+	 * alignaddr was performed.
+	 */
+2:
+	cmp		%o2, 0
+	add		%o1, %g1, %o1
+	VISExitHalf
+	be,pn		%XCC, 85f
+	 sub		%o0, %o1, %o3
+
+	andcc		%g1, 0x7, %g0
+	bne,pn		%icc, 90f
+	 andcc		%o2, 0x8, %g0
+	be,pt		%icc, 1f
+	 nop
+	EX_LD(LOAD(ldx, %o1, %o5))
+	EX_ST(STORE(stx, %o5, %o1 + %o3))
+	add		%o1, 0x8, %o1
+
+1:	andcc		%o2, 0x4, %g0
+	be,pt		%icc, 1f
+	 nop
+	EX_LD(LOAD(lduw, %o1, %o5))
+	EX_ST(STORE(stw, %o5, %o1 + %o3))
+	add		%o1, 0x4, %o1
+
+1:	andcc		%o2, 0x2, %g0
+	be,pt		%icc, 1f
+	 nop
+	EX_LD(LOAD(lduh, %o1, %o5))
+	EX_ST(STORE(sth, %o5, %o1 + %o3))
+	add		%o1, 0x2, %o1
+
+1:	andcc		%o2, 0x1, %g0
+	be,pt		%icc, 85f
+	 nop
+	EX_LD(LOAD(ldub, %o1, %o5))
+	ba,pt		%xcc, 85f
+	 EX_ST(STORE(stb, %o5, %o1 + %o3))
+
+	.align		64
+70: /* 16 < len <= 64 */
+	bne,pn		%XCC, 75f
+	 sub		%o0, %o1, %o3
+
+72:
+	andn		%o2, 0xf, GLOBAL_SPARE
+	and		%o2, 0xf, %o2
+1:	subcc		GLOBAL_SPARE, 0x10, GLOBAL_SPARE
+	EX_LD(LOAD(ldx, %o1 + 0x00, %o5))
+	EX_LD(LOAD(ldx, %o1 + 0x08, %g1))
+	EX_ST(STORE(stx, %o5, %o1 + %o3))
+	add		%o1, 0x8, %o1
+	EX_ST(STORE(stx, %g1, %o1 + %o3))
+	bgu,pt		%XCC, 1b
+	 add		%o1, 0x8, %o1
+73:	andcc		%o2, 0x8, %g0
+	be,pt		%XCC, 1f
+	 nop
+	sub		%o2, 0x8, %o2
+	EX_LD(LOAD(ldx, %o1, %o5))
+	EX_ST(STORE(stx, %o5, %o1 + %o3))
+	add		%o1, 0x8, %o1
+1:	andcc		%o2, 0x4, %g0
+	be,pt		%XCC, 1f
+	 nop
+	sub		%o2, 0x4, %o2
+	EX_LD(LOAD(lduw, %o1, %o5))
+	EX_ST(STORE(stw, %o5, %o1 + %o3))
+	add		%o1, 0x4, %o1
+1:	cmp		%o2, 0
+	be,pt		%XCC, 85f
+	 nop
+	ba,pt		%xcc, 90f
+	 nop
+
+75:
+	andcc		%o0, 0x7, %g1
+	sub		%g1, 0x8, %g1
+	be,pn		%icc, 2f
+	 sub		%g0, %g1, %g1
+	sub		%o2, %g1, %o2
+
+1:	subcc		%g1, 1, %g1
+	EX_LD(LOAD(ldub, %o1, %o5))
+	EX_ST(STORE(stb, %o5, %o1 + %o3))
+	bgu,pt		%icc, 1b
+	 add		%o1, 1, %o1
+
+2:	add		%o1, %o3, %o0
+	andcc		%o1, 0x7, %g1
+	bne,pt		%icc, 8f
+	 sll		%g1, 3, %g1
+
+	cmp		%o2, 16
+	bgeu,pt		%icc, 72b
+	 nop
+	ba,a,pt		%xcc, 73b
+
+8:	mov		64, %o3
+	andn		%o1, 0x7, %o1
+	EX_LD(LOAD(ldx, %o1, %g2))
+	sub		%o3, %g1, %o3
+	andn		%o2, 0x7, GLOBAL_SPARE
+	sllx		%g2, %g1, %g2
+1:	EX_LD(LOAD(ldx, %o1 + 0x8, %g3))
+	subcc		GLOBAL_SPARE, 0x8, GLOBAL_SPARE
+	add		%o1, 0x8, %o1
+	srlx		%g3, %o3, %o5
+	or		%o5, %g2, %o5
+	EX_ST(STORE(stx, %o5, %o0))
+	add		%o0, 0x8, %o0
+	bgu,pt		%icc, 1b
+	 sllx		%g3, %g1, %g2
+
+	srl		%g1, 3, %g1
+	andcc		%o2, 0x7, %o2
+	be,pn		%icc, 85f
+	 add		%o1, %g1, %o1
+	ba,pt		%xcc, 90f
+	 sub		%o0, %o1, %o3
+
+	.align		64
+80: /* 0 < len <= 16 */
+	andcc		%o3, 0x3, %g0
+	bne,pn		%XCC, 90f
+	 sub		%o0, %o1, %o3
+
+1:
+	subcc		%o2, 4, %o2
+	EX_LD(LOAD(lduw, %o1, %g1))
+	EX_ST(STORE(stw, %g1, %o1 + %o3))
+	bgu,pt		%XCC, 1b
+	 add		%o1, 4, %o1
+
+85:	retl
+	 mov		EX_RETVAL(%o4), %o0
+
+	.align		32
+90:
+	subcc		%o2, 1, %o2
+	EX_LD(LOAD(ldub, %o1, %g1))
+	EX_ST(STORE(stb, %g1, %o1 + %o3))
+	bgu,pt		%XCC, 90b
+	 add		%o1, 1, %o1
+	retl
+	 mov		EX_RETVAL(%o4), %o0
+
+	.size		FUNC_NAME, .-FUNC_NAME
diff --git a/arch/sparc64/lib/U3patch.S b/arch/sparc64/lib/U3patch.S
new file mode 100644
index 0000000..e2b6c5e
--- /dev/null
+++ b/arch/sparc64/lib/U3patch.S
@@ -0,0 +1,32 @@
+/* U3patch.S: Patch Ultra-I routines with Ultra-III variant.
+ *
+ * Copyright (C) 2004 David S. Miller <davem@redhat.com>
+ */
+
+#define BRANCH_ALWAYS	0x10680000
+#define NOP		0x01000000
+#define ULTRA3_DO_PATCH(OLD, NEW)	\
+	sethi	%hi(NEW), %g1; \
+	or	%g1, %lo(NEW), %g1; \
+	sethi	%hi(OLD), %g2; \
+	or	%g2, %lo(OLD), %g2; \
+	sub	%g1, %g2, %g1; \
+	sethi	%hi(BRANCH_ALWAYS), %g3; \
+	srl	%g1, 2, %g1; \
+	or	%g3, %lo(BRANCH_ALWAYS), %g3; \
+	or	%g3, %g1, %g3; \
+	stw	%g3, [%g2]; \
+	sethi	%hi(NOP), %g3; \
+	or	%g3, %lo(NOP), %g3; \
+	stw	%g3, [%g2 + 0x4]; \
+	flush	%g2;
+
+	.globl	cheetah_patch_copyops
+	.type	cheetah_patch_copyops,#function
+cheetah_patch_copyops:
+	ULTRA3_DO_PATCH(memcpy, U3memcpy)
+	ULTRA3_DO_PATCH(___copy_from_user, U3copy_from_user)
+	ULTRA3_DO_PATCH(___copy_to_user, U3copy_to_user)
+	retl
+	 nop
+	.size	cheetah_patch_copyops,.-cheetah_patch_copyops
diff --git a/arch/sparc64/lib/VISsave.S b/arch/sparc64/lib/VISsave.S
new file mode 100644
index 0000000..65e328d
--- /dev/null
+++ b/arch/sparc64/lib/VISsave.S
@@ -0,0 +1,131 @@
+/* $Id: VISsave.S,v 1.6 2002/02/09 19:49:30 davem Exp $
+ * VISsave.S: Code for saving FPU register state for
+ *            VIS routines. One should not call this directly,
+ *            but use macros provided in <asm/visasm.h>.
+ *
+ * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
+ */
+
+#include <asm/asi.h>
+#include <asm/page.h>
+#include <asm/ptrace.h>
+#include <asm/visasm.h>
+#include <asm/thread_info.h>
+
+	.text
+	.globl		VISenter, VISenterhalf
+
+	/* On entry: %o5=current FPRS value, %g7 is callers address */
+	/* May clobber %o5, %g1, %g2, %g3, %g7, %icc, %xcc */
+
+	/* Nothing special need be done here to handle pre-emption, this
+	 * FPU save/restore mechanism is already preemption safe.
+	 */
+
+	.align		32
+VISenter:
+	ldub		[%g6 + TI_FPDEPTH], %g1
+	brnz,a,pn	%g1, 1f
+	 cmp		%g1, 1
+	stb		%g0, [%g6 + TI_FPSAVED]
+	stx		%fsr, [%g6 + TI_XFSR]
+9:	jmpl		%g7 + %g0, %g0
+	 nop
+1:	bne,pn		%icc, 2f
+
+	 srl		%g1, 1, %g1
+vis1:	ldub		[%g6 + TI_FPSAVED], %g3
+	stx		%fsr, [%g6 + TI_XFSR]
+	or		%g3, %o5, %g3
+	stb		%g3, [%g6 + TI_FPSAVED]
+	rd		%gsr, %g3
+	clr		%g1
+	ba,pt		%xcc, 3f
+
+	 stx		%g3, [%g6 + TI_GSR]
+2:	add		%g6, %g1, %g3
+	cmp		%o5, FPRS_DU
+	be,pn		%icc, 6f
+	 sll		%g1, 3, %g1
+	stb		%o5, [%g3 + TI_FPSAVED]
+	rd		%gsr, %g2
+	add		%g6, %g1, %g3
+	stx		%g2, [%g3 + TI_GSR]
+
+	add		%g6, %g1, %g2
+	stx		%fsr, [%g2 + TI_XFSR]
+	sll		%g1, 5, %g1
+3:	andcc		%o5, FPRS_DL|FPRS_DU, %g0
+	be,pn		%icc, 9b
+	 add		%g6, TI_FPREGS, %g2
+	andcc		%o5, FPRS_DL, %g0
+	membar		#StoreStore | #LoadStore
+
+	be,pn		%icc, 4f
+	 add		%g6, TI_FPREGS+0x40, %g3
+	stda		%f0, [%g2 + %g1] ASI_BLK_P
+	stda		%f16, [%g3 + %g1] ASI_BLK_P
+	andcc		%o5, FPRS_DU, %g0
+	be,pn		%icc, 5f
+4:	 add		%g1, 128, %g1
+	stda		%f32, [%g2 + %g1] ASI_BLK_P
+
+	stda		%f48, [%g3 + %g1] ASI_BLK_P
+5:	membar		#Sync
+	jmpl		%g7 + %g0, %g0
+	 nop
+
+6:	ldub		[%g3 + TI_FPSAVED], %o5
+	or		%o5, FPRS_DU, %o5
+	add		%g6, TI_FPREGS+0x80, %g2
+	stb		%o5, [%g3 + TI_FPSAVED]
+
+	sll		%g1, 5, %g1
+	add		%g6, TI_FPREGS+0xc0, %g3
+	wr		%g0, FPRS_FEF, %fprs
+	membar		#StoreStore | #LoadStore
+	stda		%f32, [%g2 + %g1] ASI_BLK_P
+	stda		%f48, [%g3 + %g1] ASI_BLK_P
+	membar		#Sync
+	jmpl		%g7 + %g0, %g0
+
+	 nop
+
+	.align		32
+VISenterhalf:
+	ldub		[%g6 + TI_FPDEPTH], %g1
+	brnz,a,pn	%g1, 1f
+	 cmp		%g1, 1
+	stb		%g0, [%g6 + TI_FPSAVED]
+	stx		%fsr, [%g6 + TI_XFSR]
+	clr		%o5
+	jmpl		%g7 + %g0, %g0
+	 wr		%g0, FPRS_FEF, %fprs
+
+1:	bne,pn		%icc, 2f
+	 srl		%g1, 1, %g1
+	ba,pt		%xcc, vis1
+	 sub		%g7, 8, %g7
+2:	addcc		%g6, %g1, %g3
+	sll		%g1, 3, %g1
+	andn		%o5, FPRS_DU, %g2
+	stb		%g2, [%g3 + TI_FPSAVED]
+
+	rd		%gsr, %g2
+	add		%g6, %g1, %g3
+	stx		%g2, [%g3 + TI_GSR]
+	add		%g6, %g1, %g2
+	stx		%fsr, [%g2 + TI_XFSR]
+	sll		%g1, 5, %g1
+3:	andcc		%o5, FPRS_DL, %g0
+	be,pn		%icc, 4f
+	 add		%g6, TI_FPREGS, %g2
+
+	membar		#StoreStore | #LoadStore
+	add		%g6, TI_FPREGS+0x40, %g3
+	stda		%f0, [%g2 + %g1] ASI_BLK_P
+	stda		%f16, [%g3 + %g1] ASI_BLK_P
+	membar		#Sync
+4:	and		%o5, FPRS_DU, %o5
+	jmpl		%g7 + %g0, %g0
+	 wr		%o5, FPRS_FEF, %fprs
diff --git a/arch/sparc64/lib/atomic.S b/arch/sparc64/lib/atomic.S
new file mode 100644
index 0000000..e528b8d
--- /dev/null
+++ b/arch/sparc64/lib/atomic.S
@@ -0,0 +1,139 @@
+/* $Id: atomic.S,v 1.4 2001/11/18 00:12:56 davem Exp $
+ * atomic.S: These things are too big to do inline.
+ *
+ * Copyright (C) 1999 David S. Miller (davem@redhat.com)
+ */
+
+#include <linux/config.h>
+#include <asm/asi.h>
+
+	/* On SMP we need to use memory barriers to ensure
+	 * correct memory operation ordering, nop these out
+	 * for uniprocessor.
+	 */
+#ifdef CONFIG_SMP
+#define ATOMIC_PRE_BARRIER	membar #StoreLoad | #LoadLoad
+#define ATOMIC_POST_BARRIER	membar #StoreLoad | #StoreStore
+#else
+#define ATOMIC_PRE_BARRIER	nop
+#define ATOMIC_POST_BARRIER	nop
+#endif
+
+	.text
+
+	/* Two versions of the atomic routines, one that
+	 * does not return a value and does not perform
+	 * memory barriers, and a second which returns
+	 * a value and does the barriers.
+	 */
+	.globl	atomic_add
+	.type	atomic_add,#function
+atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
+1:	lduw	[%o1], %g1
+	add	%g1, %o0, %g7
+	cas	[%o1], %g1, %g7
+	cmp	%g1, %g7
+	bne,pn	%icc, 1b
+	 nop
+	retl
+	 nop
+	.size	atomic_add, .-atomic_add
+
+	.globl	atomic_sub
+	.type	atomic_sub,#function
+atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
+1:	lduw	[%o1], %g1
+	sub	%g1, %o0, %g7
+	cas	[%o1], %g1, %g7
+	cmp	%g1, %g7
+	bne,pn	%icc, 1b
+	 nop
+	retl
+	 nop
+	.size	atomic_sub, .-atomic_sub
+
+	.globl	atomic_add_ret
+	.type	atomic_add_ret,#function
+atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
+	ATOMIC_PRE_BARRIER
+1:	lduw	[%o1], %g1
+	add	%g1, %o0, %g7
+	cas	[%o1], %g1, %g7
+	cmp	%g1, %g7
+	bne,pn	%icc, 1b
+	 add	%g7, %o0, %g7
+	ATOMIC_POST_BARRIER
+	retl
+	 sra	%g7, 0, %o0
+	.size	atomic_add_ret, .-atomic_add_ret
+
+	.globl	atomic_sub_ret
+	.type	atomic_sub_ret,#function
+atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
+	ATOMIC_PRE_BARRIER
+1:	lduw	[%o1], %g1
+	sub	%g1, %o0, %g7
+	cas	[%o1], %g1, %g7
+	cmp	%g1, %g7
+	bne,pn	%icc, 1b
+	 sub	%g7, %o0, %g7
+	ATOMIC_POST_BARRIER
+	retl
+	 sra	%g7, 0, %o0
+	.size	atomic_sub_ret, .-atomic_sub_ret
+
+	.globl	atomic64_add
+	.type	atomic64_add,#function
+atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
+1:	ldx	[%o1], %g1
+	add	%g1, %o0, %g7
+	casx	[%o1], %g1, %g7
+	cmp	%g1, %g7
+	bne,pn	%xcc, 1b
+	 nop
+	retl
+	 nop
+	.size	atomic64_add, .-atomic64_add
+
+	.globl	atomic64_sub
+	.type	atomic64_sub,#function
+atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
+1:	ldx	[%o1], %g1
+	sub	%g1, %o0, %g7
+	casx	[%o1], %g1, %g7
+	cmp	%g1, %g7
+	bne,pn	%xcc, 1b
+	 nop
+	retl
+	 nop
+	.size	atomic64_sub, .-atomic64_sub
+
+	.globl	atomic64_add_ret
+	.type	atomic64_add_ret,#function
+atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
+	ATOMIC_PRE_BARRIER
+1:	ldx	[%o1], %g1
+	add	%g1, %o0, %g7
+	casx	[%o1], %g1, %g7
+	cmp	%g1, %g7
+	bne,pn	%xcc, 1b
+	 add	%g7, %o0, %g7
+	ATOMIC_POST_BARRIER
+	retl
+	 mov	%g7, %o0
+	.size	atomic64_add_ret, .-atomic64_add_ret
+
+	.globl	atomic64_sub_ret
+	.type	atomic64_sub_ret,#function
+atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
+	ATOMIC_PRE_BARRIER
+1:	ldx	[%o1], %g1
+	sub	%g1, %o0, %g7
+	casx	[%o1], %g1, %g7
+	cmp	%g1, %g7
+	bne,pn	%xcc, 1b
+	 sub	%g7, %o0, %g7
+	ATOMIC_POST_BARRIER
+	retl
+	 mov	%g7, %o0
+	.size	atomic64_sub_ret, .-atomic64_sub_ret
diff --git a/arch/sparc64/lib/bitops.S b/arch/sparc64/lib/bitops.S
new file mode 100644
index 0000000..886dcd2
--- /dev/null
+++ b/arch/sparc64/lib/bitops.S
@@ -0,0 +1,145 @@
+/* $Id: bitops.S,v 1.3 2001/11/18 00:12:56 davem Exp $
+ * bitops.S: Sparc64 atomic bit operations.
+ *
+ * Copyright (C) 2000 David S. Miller (davem@redhat.com)
+ */
+
+#include <linux/config.h>
+#include <asm/asi.h>
+
+	/* On SMP we need to use memory barriers to ensure
+	 * correct memory operation ordering, nop these out
+	 * for uniprocessor.
+	 */
+#ifdef CONFIG_SMP
+#define BITOP_PRE_BARRIER	membar #StoreLoad | #LoadLoad
+#define BITOP_POST_BARRIER	membar #StoreLoad | #StoreStore
+#else
+#define BITOP_PRE_BARRIER	nop
+#define BITOP_POST_BARRIER	nop
+#endif
+
+	.text
+
+	.globl	test_and_set_bit
+	.type	test_and_set_bit,#function
+test_and_set_bit:	/* %o0=nr, %o1=addr */
+	BITOP_PRE_BARRIER
+	srlx	%o0, 6, %g1
+	mov	1, %o2
+	sllx	%g1, 3, %g3
+	and	%o0, 63, %g2
+	sllx	%o2, %g2, %o2
+	add	%o1, %g3, %o1
+1:	ldx	[%o1], %g7
+	or	%g7, %o2, %g1
+	casx	[%o1], %g7, %g1
+	cmp	%g7, %g1
+	bne,pn	%xcc, 1b
+	 and	%g7, %o2, %g2
+	BITOP_POST_BARRIER
+	clr	%o0
+	retl
+	 movrne	%g2, 1, %o0
+	.size	test_and_set_bit, .-test_and_set_bit
+
+	.globl	test_and_clear_bit
+	.type	test_and_clear_bit,#function
+test_and_clear_bit:	/* %o0=nr, %o1=addr */
+	BITOP_PRE_BARRIER
+	srlx	%o0, 6, %g1
+	mov	1, %o2
+	sllx	%g1, 3, %g3
+	and	%o0, 63, %g2
+	sllx	%o2, %g2, %o2
+	add	%o1, %g3, %o1
+1:	ldx	[%o1], %g7
+	andn	%g7, %o2, %g1
+	casx	[%o1], %g7, %g1
+	cmp	%g7, %g1
+	bne,pn	%xcc, 1b
+	 and	%g7, %o2, %g2
+	BITOP_POST_BARRIER
+	clr	%o0
+	retl
+	 movrne	%g2, 1, %o0
+	.size	test_and_clear_bit, .-test_and_clear_bit
+
+	.globl	test_and_change_bit
+	.type	test_and_change_bit,#function
+test_and_change_bit:	/* %o0=nr, %o1=addr */
+	BITOP_PRE_BARRIER
+	srlx	%o0, 6, %g1
+	mov	1, %o2
+	sllx	%g1, 3, %g3
+	and	%o0, 63, %g2
+	sllx	%o2, %g2, %o2
+	add	%o1, %g3, %o1
+1:	ldx	[%o1], %g7
+	xor	%g7, %o2, %g1
+	casx	[%o1], %g7, %g1
+	cmp	%g7, %g1
+	bne,pn	%xcc, 1b
+	 and	%g7, %o2, %g2
+	BITOP_POST_BARRIER
+	clr	%o0
+	retl
+	 movrne	%g2, 1, %o0
+	.size	test_and_change_bit, .-test_and_change_bit
+
+	.globl	set_bit
+	.type	set_bit,#function
+set_bit:		/* %o0=nr, %o1=addr */
+	srlx	%o0, 6, %g1
+	mov	1, %o2
+	sllx	%g1, 3, %g3
+	and	%o0, 63, %g2
+	sllx	%o2, %g2, %o2
+	add	%o1, %g3, %o1
+1:	ldx	[%o1], %g7
+	or	%g7, %o2, %g1
+	casx	[%o1], %g7, %g1
+	cmp	%g7, %g1
+	bne,pn	%xcc, 1b
+	 nop
+	retl
+	 nop
+	.size	set_bit, .-set_bit
+
+	.globl	clear_bit
+	.type	clear_bit,#function
+clear_bit:		/* %o0=nr, %o1=addr */
+	srlx	%o0, 6, %g1
+	mov	1, %o2
+	sllx	%g1, 3, %g3
+	and	%o0, 63, %g2
+	sllx	%o2, %g2, %o2
+	add	%o1, %g3, %o1
+1:	ldx	[%o1], %g7
+	andn	%g7, %o2, %g1
+	casx	[%o1], %g7, %g1
+	cmp	%g7, %g1
+	bne,pn	%xcc, 1b
+	 nop
+	retl
+	 nop
+	.size	clear_bit, .-clear_bit
+
+	.globl	change_bit
+	.type	change_bit,#function
+change_bit:		/* %o0=nr, %o1=addr */
+	srlx	%o0, 6, %g1
+	mov	1, %o2
+	sllx	%g1, 3, %g3
+	and	%o0, 63, %g2
+	sllx	%o2, %g2, %o2
+	add	%o1, %g3, %o1
+1:	ldx	[%o1], %g7
+	xor	%g7, %o2, %g1
+	casx	[%o1], %g7, %g1
+	cmp	%g7, %g1
+	bne,pn	%xcc, 1b
+	 nop
+	retl
+	 nop
+	.size	change_bit, .-change_bit
diff --git a/arch/sparc64/lib/bzero.S b/arch/sparc64/lib/bzero.S
new file mode 100644
index 0000000..21a933f
--- /dev/null
+++ b/arch/sparc64/lib/bzero.S
@@ -0,0 +1,158 @@
+/* bzero.S: Simple prefetching memset, bzero, and clear_user
+ *          implementations.
+ *
+ * Copyright (C) 2005 David S. Miller <davem@davemloft.net>
+ */
+
+	.text
+
+	.globl	__memset
+	.type	__memset, #function
+__memset:		/* %o0=buf, %o1=pat, %o2=len */
+
+	.globl	memset
+	.type	memset, #function
+memset:			/* %o0=buf, %o1=pat, %o2=len */
+	and		%o1, 0xff, %o3
+	mov		%o2, %o1
+	sllx		%o3, 8, %g1
+	or		%g1, %o3, %o2
+	sllx		%o2, 16, %g1
+	or		%g1, %o2, %o2
+	sllx		%o2, 32, %g1
+	ba,pt		%xcc, 1f
+	 or		%g1, %o2, %o2
+
+	.globl	__bzero
+	.type	__bzero, #function
+__bzero:		/* %o0=buf, %o1=len */
+	clr		%o2
+1:	mov		%o0, %o3
+	brz,pn		%o1, __bzero_done
+	 cmp		%o1, 16
+	bl,pn		%icc, __bzero_tiny
+	 prefetch	[%o0 + 0x000], #n_writes
+	andcc		%o0, 0x3, %g0
+	be,pt		%icc, 2f
+1:	 stb		%o2, [%o0 + 0x00]
+	add		%o0, 1, %o0
+	andcc		%o0, 0x3, %g0
+	bne,pn		%icc, 1b
+	 sub		%o1, 1, %o1
+2:	andcc		%o0, 0x7, %g0
+	be,pt		%icc, 3f
+	 stw		%o2, [%o0 + 0x00]
+	sub		%o1, 4, %o1
+	add		%o0, 4, %o0
+3:	and		%o1, 0x38, %g1
+	cmp		%o1, 0x40
+	andn		%o1, 0x3f, %o4
+	bl,pn		%icc, 5f
+	 and		%o1, 0x7, %o1
+	prefetch	[%o0 + 0x040], #n_writes
+	prefetch	[%o0 + 0x080], #n_writes
+	prefetch	[%o0 + 0x0c0], #n_writes
+	prefetch	[%o0 + 0x100], #n_writes
+	prefetch	[%o0 + 0x140], #n_writes
+4:	prefetch	[%o0 + 0x180], #n_writes
+	stx		%o2, [%o0 + 0x00]
+	stx		%o2, [%o0 + 0x08]
+	stx		%o2, [%o0 + 0x10]
+	stx		%o2, [%o0 + 0x18]
+	stx		%o2, [%o0 + 0x20]
+	stx		%o2, [%o0 + 0x28]
+	stx		%o2, [%o0 + 0x30]
+	stx		%o2, [%o0 + 0x38]
+	subcc		%o4, 0x40, %o4
+	bne,pt		%icc, 4b
+	 add		%o0, 0x40, %o0
+	brz,pn		%g1, 6f
+	 nop
+5:	stx		%o2, [%o0 + 0x00]
+	subcc		%g1, 8, %g1
+	bne,pt		%icc, 5b
+	 add		%o0, 0x8, %o0
+6:	brz,pt		%o1, __bzero_done
+	 nop
+__bzero_tiny:
+1:	stb		%o2, [%o0 + 0x00]
+	subcc		%o1, 1, %o1
+	bne,pt		%icc, 1b
+	 add		%o0, 1, %o0
+__bzero_done:
+	retl
+	 mov		%o3, %o0
+	.size		__bzero, .-__bzero
+	.size		__memset, .-__memset
+	.size		memset, .-memset
+
+#define EX_ST(x,y)		\
+98:	x,y;			\
+	.section .fixup;	\
+	.align 4;		\
+99:	retl;			\
+	 mov	%o1, %o0;	\
+	.section __ex_table;	\
+	.align 4;		\
+	.word 98b, 99b;		\
+	.text;			\
+	.align 4;
+
+	.globl	__bzero_noasi
+	.type	__bzero_noasi, #function
+__bzero_noasi:		/* %o0=buf, %o1=len */
+	brz,pn		%o1, __bzero_noasi_done
+	 cmp		%o1, 16
+	bl,pn		%icc, __bzero_noasi_tiny
+	 EX_ST(prefetcha [%o0 + 0x00] %asi, #n_writes)
+	andcc		%o0, 0x3, %g0
+	be,pt		%icc, 2f
+1:	 EX_ST(stba	%g0, [%o0 + 0x00] %asi)
+	add		%o0, 1, %o0
+	andcc		%o0, 0x3, %g0
+	bne,pn		%icc, 1b
+	 sub		%o1, 1, %o1
+2:	andcc		%o0, 0x7, %g0
+	be,pt		%icc, 3f
+	 EX_ST(stwa	%g0, [%o0 + 0x00] %asi)
+	sub		%o1, 4, %o1
+	add		%o0, 4, %o0
+3:	and		%o1, 0x38, %g1
+	cmp		%o1, 0x40
+	andn		%o1, 0x3f, %o4
+	bl,pn		%icc, 5f
+	 and		%o1, 0x7, %o1
+	EX_ST(prefetcha	[%o0 + 0x040] %asi, #n_writes)
+	EX_ST(prefetcha	[%o0 + 0x080] %asi, #n_writes)
+	EX_ST(prefetcha	[%o0 + 0x0c0] %asi, #n_writes)
+	EX_ST(prefetcha	[%o0 + 0x100] %asi, #n_writes)
+	EX_ST(prefetcha	[%o0 + 0x140] %asi, #n_writes)
+4:	EX_ST(prefetcha	[%o0 + 0x180] %asi, #n_writes)
+	EX_ST(stxa	%g0, [%o0 + 0x00] %asi)
+	EX_ST(stxa	%g0, [%o0 + 0x08] %asi)
+	EX_ST(stxa	%g0, [%o0 + 0x10] %asi)
+	EX_ST(stxa	%g0, [%o0 + 0x18] %asi)
+	EX_ST(stxa	%g0, [%o0 + 0x20] %asi)
+	EX_ST(stxa	%g0, [%o0 + 0x28] %asi)
+	EX_ST(stxa	%g0, [%o0 + 0x30] %asi)
+	EX_ST(stxa	%g0, [%o0 + 0x38] %asi)
+	subcc		%o4, 0x40, %o4
+	bne,pt		%icc, 4b
+	 add		%o0, 0x40, %o0
+	brz,pn		%g1, 6f
+	 nop
+5:	EX_ST(stxa	%g0, [%o0 + 0x00] %asi)
+	subcc		%g1, 8, %g1
+	bne,pt		%icc, 5b
+	 add		%o0, 0x8, %o0
+6:	brz,pt		%o1, __bzero_noasi_done
+	 nop
+__bzero_noasi_tiny:
+1:	EX_ST(stba	%g0, [%o0 + 0x00] %asi)
+	subcc		%o1, 1, %o1
+	bne,pt		%icc, 1b
+	 add		%o0, 1, %o0
+__bzero_noasi_done:
+	retl
+	 clr		%o0
+	.size		__bzero_noasi, .-__bzero_noasi
diff --git a/arch/sparc64/lib/checksum.S b/arch/sparc64/lib/checksum.S
new file mode 100644
index 0000000..ba9cd3c
--- /dev/null
+++ b/arch/sparc64/lib/checksum.S
@@ -0,0 +1,172 @@
+/* checksum.S: Sparc V9 optimized checksum code.
+ *
+ *  Copyright(C) 1995 Linus Torvalds
+ *  Copyright(C) 1995 Miguel de Icaza
+ *  Copyright(C) 1996, 2000 David S. Miller
+ *  Copyright(C) 1997 Jakub Jelinek
+ *
+ * derived from:
+ *	Linux/Alpha checksum c-code
+ *      Linux/ix86 inline checksum assembly
+ *      RFC1071 Computing the Internet Checksum (esp. Jacobsons m68k code)
+ *	David Mosberger-Tang for optimized reference c-code
+ *	BSD4.4 portable checksum routine
+ */
+
+	.text
+
+csum_partial_fix_alignment:
+	/* We checked for zero length already, so there must be
+	 * at least one byte.
+	 */
+	be,pt		%icc, 1f
+	 nop
+	ldub		[%o0 + 0x00], %o4
+	add		%o0, 1, %o0
+	sub		%o1, 1, %o1
+1:	andcc		%o0, 0x2, %g0
+	be,pn		%icc, csum_partial_post_align
+	 cmp		%o1, 2
+	blu,pn		%icc, csum_partial_end_cruft
+	 nop
+	lduh		[%o0 + 0x00], %o5
+	add		%o0, 2, %o0
+	sub		%o1, 2, %o1
+	ba,pt		%xcc, csum_partial_post_align
+	 add		%o5, %o4, %o4
+
+	.align		32
+	.globl		csum_partial
+csum_partial:		/* %o0=buff, %o1=len, %o2=sum */
+	prefetch	[%o0 + 0x000], #n_reads
+	clr		%o4
+	prefetch	[%o0 + 0x040], #n_reads
+	brz,pn		%o1, csum_partial_finish
+	 andcc		%o0, 0x3, %g0
+
+	/* We "remember" whether the lowest bit in the address
+	 * was set in %g7.  Because if it is, we have to swap
+	 * upper and lower 8 bit fields of the sum we calculate.
+	*/
+	bne,pn		%icc, csum_partial_fix_alignment
+	 andcc		%o0, 0x1, %g7
+
+csum_partial_post_align:
+	prefetch	[%o0 + 0x080], #n_reads
+	andncc		%o1, 0x3f, %o3
+
+	prefetch	[%o0 + 0x0c0], #n_reads
+	sub		%o1, %o3, %o1
+	brz,pn		%o3, 2f
+	 prefetch	[%o0 + 0x100], #n_reads
+
+	/* So that we don't need to use the non-pairing
+	 * add-with-carry instructions we accumulate 32-bit
+	 * values into a 64-bit register.  At the end of the
+	 * loop we fold it down to 32-bits and so on.
+	 */
+	prefetch	[%o0 + 0x140], #n_reads
+1:	lduw		[%o0 + 0x00], %o5
+	lduw		[%o0 + 0x04], %g1
+	lduw		[%o0 + 0x08], %g2
+	add		%o4, %o5, %o4
+	lduw		[%o0 + 0x0c], %g3
+	add		%o4, %g1, %o4
+	lduw		[%o0 + 0x10], %o5
+	add		%o4, %g2, %o4
+	lduw		[%o0 + 0x14], %g1
+	add		%o4, %g3, %o4
+	lduw		[%o0 + 0x18], %g2
+	add		%o4, %o5, %o4
+	lduw		[%o0 + 0x1c], %g3
+	add		%o4, %g1, %o4
+	lduw		[%o0 + 0x20], %o5
+	add		%o4, %g2, %o4
+	lduw		[%o0 + 0x24], %g1
+	add		%o4, %g3, %o4
+	lduw		[%o0 + 0x28], %g2
+	add		%o4, %o5, %o4
+	lduw		[%o0 + 0x2c], %g3
+	add		%o4, %g1, %o4
+	lduw		[%o0 + 0x30], %o5
+	add		%o4, %g2, %o4
+	lduw		[%o0 + 0x34], %g1
+	add		%o4, %g3, %o4
+	lduw		[%o0 + 0x38], %g2
+	add		%o4, %o5, %o4
+	lduw		[%o0 + 0x3c], %g3
+	add		%o4, %g1, %o4
+	prefetch	[%o0 + 0x180], #n_reads
+	add		%o4, %g2, %o4
+	subcc		%o3, 0x40, %o3
+	add		%o0, 0x40, %o0
+	bne,pt		%icc, 1b
+	 add		%o4, %g3, %o4
+
+2:	and		%o1, 0x3c, %o3
+	brz,pn		%o3, 2f
+	 sub		%o1, %o3, %o1
+1:	lduw		[%o0 + 0x00], %o5
+	subcc		%o3, 0x4, %o3
+	add		%o0, 0x4, %o0
+	bne,pt		%icc, 1b
+	 add		%o4, %o5, %o4
+
+2:
+	/* fold 64-->32 */
+	srlx		%o4, 32, %o5
+	srl		%o4, 0, %o4
+	add		%o4, %o5, %o4
+	srlx		%o4, 32, %o5
+	srl		%o4, 0, %o4
+	add		%o4, %o5, %o4
+
+	/* fold 32-->16 */
+	sethi		%hi(0xffff0000), %g1
+	srl		%o4, 16, %o5
+	andn		%o4, %g1, %g2
+	add		%o5, %g2, %o4
+	srl		%o4, 16, %o5
+	andn		%o4, %g1, %g2
+	add		%o5, %g2, %o4
+
+csum_partial_end_cruft:
+	/* %o4 has the 16-bit sum we have calculated so-far.  */
+	cmp		%o1, 2
+	blu,pt		%icc, 1f
+	 nop
+	lduh		[%o0 + 0x00], %o5
+	sub		%o1, 2, %o1
+	add		%o0, 2, %o0
+	add		%o4, %o5, %o4
+1:	brz,pt		%o1, 1f
+	 nop
+	ldub		[%o0 + 0x00], %o5
+	sub		%o1, 1, %o1
+	add		%o0, 1, %o0
+	sllx		%o5, 8, %o5
+	add		%o4, %o5, %o4
+1:
+	/* fold 32-->16 */
+	sethi		%hi(0xffff0000), %g1
+	srl		%o4, 16, %o5
+	andn		%o4, %g1, %g2
+	add		%o5, %g2, %o4
+	srl		%o4, 16, %o5
+	andn		%o4, %g1, %g2
+	add		%o5, %g2, %o4
+
+1:	brz,pt		%g7, 1f
+	 nop
+
+	/* We started with an odd byte, byte-swap the result.  */
+	srl		%o4, 8, %o5
+	and		%o4, 0xff, %g1
+	sll		%g1, 8, %g1
+	or		%o5, %g1, %o4
+
+1:	add		%o2, %o4, %o2
+
+csum_partial_finish:
+	retl
+	 mov		%o2, %o0
diff --git a/arch/sparc64/lib/clear_page.S b/arch/sparc64/lib/clear_page.S
new file mode 100644
index 0000000..b59884e
--- /dev/null
+++ b/arch/sparc64/lib/clear_page.S
@@ -0,0 +1,105 @@
+/* clear_page.S: UltraSparc optimized clear page.
+ *
+ * Copyright (C) 1996, 1998, 1999, 2000, 2004 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1997 Jakub Jelinek (jakub@redhat.com)
+ */
+
+#include <asm/visasm.h>
+#include <asm/thread_info.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/spitfire.h>
+
+	/* What we used to do was lock a TLB entry into a specific
+	 * TLB slot, clear the page with interrupts disabled, then
+	 * restore the original TLB entry.  This was great for
+	 * disturbing the TLB as little as possible, but it meant
+	 * we had to keep interrupts disabled for a long time.
+	 *
+	 * Now, we simply use the normal TLB loading mechanism,
+	 * and this makes the cpu choose a slot all by itself.
+	 * Then we do a normal TLB flush on exit.  We need only
+	 * disable preemption during the clear.
+	 */
+
+#define TTE_BITS_TOP	(_PAGE_VALID | _PAGE_SZBITS)
+#define TTE_BITS_BOTTOM	(_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W)
+
+	.text
+
+	.globl		_clear_page
+_clear_page:		/* %o0=dest */
+	ba,pt		%xcc, clear_page_common
+	 clr		%o4
+
+	/* This thing is pretty important, it shows up
+	 * on the profiles via do_anonymous_page().
+	 */
+	.align		32
+	.globl		clear_user_page
+clear_user_page:	/* %o0=dest, %o1=vaddr */
+	lduw		[%g6 + TI_PRE_COUNT], %o2
+	sethi		%uhi(PAGE_OFFSET), %g2
+	sethi		%hi(PAGE_SIZE), %o4
+
+	sllx		%g2, 32, %g2
+	sethi		%uhi(TTE_BITS_TOP), %g3
+
+	sllx		%g3, 32, %g3
+	sub		%o0, %g2, %g1		! paddr
+
+	or		%g3, TTE_BITS_BOTTOM, %g3
+	and		%o1, %o4, %o0		! vaddr D-cache alias bit
+
+	or		%g1, %g3, %g1		! TTE data
+	sethi		%hi(TLBTEMP_BASE), %o3
+
+	add		%o2, 1, %o4
+	add		%o0, %o3, %o0		! TTE vaddr
+
+	/* Disable preemption.  */
+	mov		TLB_TAG_ACCESS, %g3
+	stw		%o4, [%g6 + TI_PRE_COUNT]
+
+	/* Load TLB entry.  */
+	rdpr		%pstate, %o4
+	wrpr		%o4, PSTATE_IE, %pstate
+	stxa		%o0, [%g3] ASI_DMMU
+	stxa		%g1, [%g0] ASI_DTLB_DATA_IN
+	flush		%g6
+	wrpr		%o4, 0x0, %pstate
+
+	mov		1, %o4
+
+clear_page_common:
+	VISEntryHalf
+	membar		#StoreLoad | #StoreStore | #LoadStore
+	fzero		%f0
+	sethi		%hi(PAGE_SIZE/64), %o1
+	mov		%o0, %g1		! remember vaddr for tlbflush
+	fzero		%f2
+	or		%o1, %lo(PAGE_SIZE/64), %o1
+	faddd		%f0, %f2, %f4
+	fmuld		%f0, %f2, %f6
+	faddd		%f0, %f2, %f8
+	fmuld		%f0, %f2, %f10
+
+	faddd		%f0, %f2, %f12
+	fmuld		%f0, %f2, %f14
+1:	stda		%f0, [%o0 + %g0] ASI_BLK_P
+	subcc		%o1, 1, %o1
+	bne,pt		%icc, 1b
+	 add		%o0, 0x40, %o0
+	membar		#Sync
+	VISExitHalf
+
+	brz,pn		%o4, out
+	 nop
+
+	stxa		%g0, [%g1] ASI_DMMU_DEMAP
+	membar		#Sync
+	stw		%o2, [%g6 + TI_PRE_COUNT]
+
+out:	retl
+	 nop
+
diff --git a/arch/sparc64/lib/copy_in_user.S b/arch/sparc64/lib/copy_in_user.S
new file mode 100644
index 0000000..816076c
--- /dev/null
+++ b/arch/sparc64/lib/copy_in_user.S
@@ -0,0 +1,119 @@
+/* copy_in_user.S: Copy from userspace to userspace.
+ *
+ * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
+ */
+
+#include <asm/asi.h>
+
+#define XCC xcc
+
+#define EX(x,y)			\
+98:	x,y;			\
+	.section .fixup;	\
+	.align 4;		\
+99:	retl;			\
+	 mov 1, %o0;		\
+	.section __ex_table;	\
+	.align 4;		\
+	.word 98b, 99b;		\
+	.text;			\
+	.align 4;
+
+	.register	%g2,#scratch
+	.register	%g3,#scratch
+
+	.text
+	.align	32
+
+	/* Don't try to get too fancy here, just nice and
+	 * simple.  This is predominantly used for well aligned
+	 * small copies in the compat layer.  It is also used
+	 * to copy register windows around during thread cloning.
+	 */
+
+	.globl		___copy_in_user
+	.type		___copy_in_user,#function
+___copy_in_user:	/* %o0=dst, %o1=src, %o2=len */
+	/* Writing to %asi is _expensive_ so we hardcode it.
+	 * Reading %asi to check for KERNEL_DS is comparatively
+	 * cheap.
+	 */
+	rd		%asi, %g1
+	cmp		%g1, ASI_AIUS
+	bne,pn		%icc, memcpy_user_stub
+	 nop
+
+	cmp		%o2, 0
+	be,pn		%XCC, 85f
+	 or		%o0, %o1, %o3
+	cmp		%o2, 16
+	bleu,a,pn	%XCC, 80f
+	 or		%o3, %o2, %o3
+
+	/* 16 < len <= 64 */
+	andcc		%o3, 0x7, %g0
+	bne,pn		%XCC, 90f
+	 sub		%o0, %o1, %o3
+
+	andn		%o2, 0x7, %o4
+	and		%o2, 0x7, %o2
+1:	subcc		%o4, 0x8, %o4
+	EX(ldxa [%o1] %asi, %o5)
+	EX(stxa %o5, [%o1 + %o3] ASI_AIUS)
+	bgu,pt		%XCC, 1b
+	 add		%o1, 0x8, %o1
+	andcc		%o2, 0x4, %g0
+	be,pt		%XCC, 1f
+	 nop
+	sub		%o2, 0x4, %o2
+	EX(lduwa [%o1] %asi, %o5)
+	EX(stwa %o5, [%o1 + %o3] ASI_AIUS)
+	add		%o1, 0x4, %o1
+1:	cmp		%o2, 0
+	be,pt		%XCC, 85f
+	 nop
+	ba,pt		%xcc, 90f
+	 nop
+
+80:	/* 0 < len <= 16 */
+	andcc		%o3, 0x3, %g0
+	bne,pn		%XCC, 90f
+	 sub		%o0, %o1, %o3
+
+82:
+	subcc		%o2, 4, %o2
+	EX(lduwa [%o1] %asi, %g1)
+	EX(stwa %g1, [%o1 + %o3] ASI_AIUS)
+	bgu,pt		%XCC, 82b
+	 add		%o1, 4, %o1
+
+85:	retl
+	 clr		%o0
+
+	.align	32
+90:
+	subcc		%o2, 1, %o2
+	EX(lduba [%o1] %asi, %g1)
+	EX(stba %g1, [%o1 + %o3] ASI_AIUS)
+	bgu,pt		%XCC, 90b
+	 add		%o1, 1, %o1
+	retl
+	 clr		%o0
+
+	.size		___copy_in_user, .-___copy_in_user
+
+	/* Act like copy_{to,in}_user(), ie. return zero instead
+	 * of original destination pointer.  This is invoked when
+	 * copy_{to,in}_user() finds that %asi is kernel space.
+	 */
+	.globl		memcpy_user_stub
+	.type		memcpy_user_stub,#function
+memcpy_user_stub:
+	save		%sp, -192, %sp
+	mov		%i0, %o0
+	mov		%i1, %o1
+	call		memcpy
+	 mov		%i2, %o2
+	ret
+	 restore	%g0, %g0, %o0
+	.size		memcpy_user_stub, .-memcpy_user_stub
diff --git a/arch/sparc64/lib/copy_page.S b/arch/sparc64/lib/copy_page.S
new file mode 100644
index 0000000..23ebf2c
--- /dev/null
+++ b/arch/sparc64/lib/copy_page.S
@@ -0,0 +1,242 @@
+/* clear_page.S: UltraSparc optimized copy page.
+ *
+ * Copyright (C) 1996, 1998, 1999, 2000, 2004 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1997 Jakub Jelinek (jakub@redhat.com)
+ */
+
+#include <asm/visasm.h>
+#include <asm/thread_info.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/spitfire.h>
+#include <asm/head.h>
+
+	/* What we used to do was lock a TLB entry into a specific
+	 * TLB slot, clear the page with interrupts disabled, then
+	 * restore the original TLB entry.  This was great for
+	 * disturbing the TLB as little as possible, but it meant
+	 * we had to keep interrupts disabled for a long time.
+	 *
+	 * Now, we simply use the normal TLB loading mechanism,
+	 * and this makes the cpu choose a slot all by itself.
+	 * Then we do a normal TLB flush on exit.  We need only
+	 * disable preemption during the clear.
+	 */
+
+#define TTE_BITS_TOP	(_PAGE_VALID | _PAGE_SZBITS)
+#define TTE_BITS_BOTTOM	(_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W)
+#define	DCACHE_SIZE	(PAGE_SIZE * 2)
+
+#if (PAGE_SHIFT == 13) || (PAGE_SHIFT == 19)
+#define PAGE_SIZE_REM	0x80
+#elif (PAGE_SHIFT == 16) || (PAGE_SHIFT == 22)
+#define PAGE_SIZE_REM	0x100
+#else
+#error Wrong PAGE_SHIFT specified
+#endif
+
+#define TOUCH(reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7)	\
+	fmovd	%reg0, %f48; 	fmovd	%reg1, %f50;		\
+	fmovd	%reg2, %f52; 	fmovd	%reg3, %f54;		\
+	fmovd	%reg4, %f56; 	fmovd	%reg5, %f58;		\
+	fmovd	%reg6, %f60; 	fmovd	%reg7, %f62;
+
+	.text
+
+	.align		32
+	.globl		copy_user_page
+	.type		copy_user_page,#function
+copy_user_page:		/* %o0=dest, %o1=src, %o2=vaddr */
+	lduw		[%g6 + TI_PRE_COUNT], %o4
+	sethi		%uhi(PAGE_OFFSET), %g2
+	sethi		%hi(PAGE_SIZE), %o3
+
+	sllx		%g2, 32, %g2
+	sethi		%uhi(TTE_BITS_TOP), %g3
+
+	sllx		%g3, 32, %g3
+	sub		%o0, %g2, %g1		! dest paddr
+
+	sub		%o1, %g2, %g2		! src paddr
+	or		%g3, TTE_BITS_BOTTOM, %g3
+
+	and		%o2, %o3, %o0		! vaddr D-cache alias bit
+	or		%g1, %g3, %g1		! dest TTE data
+
+	or		%g2, %g3, %g2		! src TTE data
+	sethi		%hi(TLBTEMP_BASE), %o3
+
+	sethi		%hi(DCACHE_SIZE), %o1
+	add		%o0, %o3, %o0		! dest TTE vaddr
+
+	add		%o4, 1, %o2
+	add		%o0, %o1, %o1		! src TTE vaddr
+
+	/* Disable preemption.  */
+	mov		TLB_TAG_ACCESS, %g3
+	stw		%o2, [%g6 + TI_PRE_COUNT]
+
+	/* Load TLB entries.  */
+	rdpr		%pstate, %o2
+	wrpr		%o2, PSTATE_IE, %pstate
+	stxa		%o0, [%g3] ASI_DMMU
+	stxa		%g1, [%g0] ASI_DTLB_DATA_IN
+	membar		#Sync
+	stxa		%o1, [%g3] ASI_DMMU
+	stxa		%g2, [%g0] ASI_DTLB_DATA_IN
+	membar		#Sync
+	wrpr		%o2, 0x0, %pstate
+
+	BRANCH_IF_ANY_CHEETAH(g3,o2,1f)
+	ba,pt		%xcc, 9f
+	 nop
+
+1:
+	VISEntryHalf
+	membar		#StoreLoad | #StoreStore | #LoadStore
+	sethi		%hi((PAGE_SIZE/64)-2), %o2
+	mov		%o0, %g1
+	prefetch	[%o1 + 0x000], #one_read
+	or		%o2, %lo((PAGE_SIZE/64)-2), %o2
+	prefetch	[%o1 + 0x040], #one_read
+	prefetch	[%o1 + 0x080], #one_read
+	prefetch	[%o1 + 0x0c0], #one_read
+	ldd		[%o1 + 0x000], %f0
+	prefetch	[%o1 + 0x100], #one_read
+	ldd		[%o1 + 0x008], %f2
+	prefetch	[%o1 + 0x140], #one_read
+	ldd		[%o1 + 0x010], %f4
+	prefetch	[%o1 + 0x180], #one_read
+	fmovd		%f0, %f16
+	ldd		[%o1 + 0x018], %f6
+	fmovd		%f2, %f18
+	ldd		[%o1 + 0x020], %f8
+	fmovd		%f4, %f20
+	ldd		[%o1 + 0x028], %f10
+	fmovd		%f6, %f22
+	ldd		[%o1 + 0x030], %f12
+	fmovd		%f8, %f24
+	ldd		[%o1 + 0x038], %f14
+	fmovd		%f10, %f26
+	ldd		[%o1 + 0x040], %f0
+1:	ldd		[%o1 + 0x048], %f2
+	fmovd		%f12, %f28
+	ldd		[%o1 + 0x050], %f4
+	fmovd		%f14, %f30
+	stda		%f16, [%o0] ASI_BLK_P
+	ldd		[%o1 + 0x058], %f6
+	fmovd		%f0, %f16
+	ldd		[%o1 + 0x060], %f8
+	fmovd		%f2, %f18
+	ldd		[%o1 + 0x068], %f10
+	fmovd		%f4, %f20
+	ldd		[%o1 + 0x070], %f12
+	fmovd		%f6, %f22
+	ldd		[%o1 + 0x078], %f14
+	fmovd		%f8, %f24
+	ldd		[%o1 + 0x080], %f0
+	prefetch	[%o1 + 0x180], #one_read
+	fmovd		%f10, %f26
+	subcc		%o2, 1, %o2
+	add		%o0, 0x40, %o0
+	bne,pt		%xcc, 1b
+	 add		%o1, 0x40, %o1
+
+	ldd		[%o1 + 0x048], %f2
+	fmovd		%f12, %f28
+	ldd		[%o1 + 0x050], %f4
+	fmovd		%f14, %f30
+	stda		%f16, [%o0] ASI_BLK_P
+	ldd		[%o1 + 0x058], %f6
+	fmovd		%f0, %f16
+	ldd		[%o1 + 0x060], %f8
+	fmovd		%f2, %f18
+	ldd		[%o1 + 0x068], %f10
+	fmovd		%f4, %f20
+	ldd		[%o1 + 0x070], %f12
+	fmovd		%f6, %f22
+	add		%o0, 0x40, %o0
+	ldd		[%o1 + 0x078], %f14
+	fmovd		%f8, %f24
+	fmovd		%f10, %f26
+	fmovd		%f12, %f28
+	fmovd		%f14, %f30
+	stda		%f16, [%o0] ASI_BLK_P
+	membar		#Sync
+	VISExitHalf
+	ba,pt		%xcc, 5f
+	 nop
+
+9:
+	VISEntry
+	ldub		[%g6 + TI_FAULT_CODE], %g3
+	mov		%o0, %g1
+	cmp		%g3, 0
+	rd		%asi, %g3
+	be,a,pt		%icc, 1f
+	 wr		%g0, ASI_BLK_P, %asi
+	wr		%g0, ASI_BLK_COMMIT_P, %asi
+1:	ldda		[%o1] ASI_BLK_P, %f0
+	add		%o1, 0x40, %o1
+	ldda		[%o1] ASI_BLK_P, %f16
+	add		%o1, 0x40, %o1
+	sethi		%hi(PAGE_SIZE), %o2
+1:	TOUCH(f0, f2, f4, f6, f8, f10, f12, f14)
+	ldda		[%o1] ASI_BLK_P, %f32
+	stda		%f48, [%o0] %asi
+	add		%o1, 0x40, %o1
+	sub		%o2, 0x40, %o2
+	add		%o0, 0x40, %o0
+	TOUCH(f16, f18, f20, f22, f24, f26, f28, f30)
+	ldda		[%o1] ASI_BLK_P, %f0
+	stda		%f48, [%o0] %asi
+	add		%o1, 0x40, %o1
+	sub		%o2, 0x40, %o2
+	add		%o0, 0x40, %o0
+	TOUCH(f32, f34, f36, f38, f40, f42, f44, f46)
+	ldda		[%o1] ASI_BLK_P, %f16
+	stda		%f48, [%o0] %asi
+	sub		%o2, 0x40, %o2
+	add		%o1, 0x40, %o1
+	cmp		%o2, PAGE_SIZE_REM
+	bne,pt		%xcc, 1b
+	 add		%o0, 0x40, %o0
+#if (PAGE_SHIFT == 16) || (PAGE_SHIFT == 22)
+	TOUCH(f0, f2, f4, f6, f8, f10, f12, f14)
+	ldda		[%o1] ASI_BLK_P, %f32
+	stda		%f48, [%o0] %asi
+	add		%o1, 0x40, %o1
+	sub		%o2, 0x40, %o2
+	add		%o0, 0x40, %o0
+	TOUCH(f16, f18, f20, f22, f24, f26, f28, f30)
+	ldda		[%o1] ASI_BLK_P, %f0
+	stda		%f48, [%o0] %asi
+	add		%o1, 0x40, %o1
+	sub		%o2, 0x40, %o2
+	add		%o0, 0x40, %o0
+	membar		#Sync
+	stda		%f32, [%o0] %asi
+	add		%o0, 0x40, %o0
+	stda		%f0, [%o0] %asi
+#else
+	membar		#Sync
+	stda		%f0, [%o0] %asi
+	add		%o0, 0x40, %o0
+	stda		%f16, [%o0] %asi
+#endif
+	membar		#Sync
+	wr		%g3, 0x0, %asi
+	VISExit
+
+5:
+	stxa		%g0, [%g1] ASI_DMMU_DEMAP
+	membar		#Sync
+
+	sethi		%hi(DCACHE_SIZE), %g2
+	stxa		%g0, [%g1 + %g2] ASI_DMMU_DEMAP
+	membar		#Sync
+
+	retl
+	 stw		%o4, [%g6 + TI_PRE_COUNT]
+
+	.size		copy_user_page, .-copy_user_page
diff --git a/arch/sparc64/lib/csum_copy.S b/arch/sparc64/lib/csum_copy.S
new file mode 100644
index 0000000..71af488
--- /dev/null
+++ b/arch/sparc64/lib/csum_copy.S
@@ -0,0 +1,308 @@
+/* csum_copy.S: Checksum+copy code for sparc64
+ *
+ * Copyright (C) 2005 David S. Miller <davem@davemloft.net>
+ */
+
+#ifdef __KERNEL__
+#define GLOBAL_SPARE	%g7
+#else
+#define GLOBAL_SPARE	%g5
+#endif
+
+#ifndef EX_LD
+#define EX_LD(x)	x
+#endif
+
+#ifndef EX_ST
+#define EX_ST(x)	x
+#endif
+
+#ifndef EX_RETVAL
+#define EX_RETVAL(x)	x
+#endif
+
+#ifndef LOAD
+#define LOAD(type,addr,dest)	type [addr], dest
+#endif
+
+#ifndef STORE
+#define STORE(type,src,addr)	type src, [addr]
+#endif
+
+#ifndef FUNC_NAME
+#define FUNC_NAME	csum_partial_copy_nocheck
+#endif
+
+	.register	%g2, #scratch
+	.register	%g3, #scratch
+
+	.text
+
+90:
+	/* We checked for zero length already, so there must be
+	 * at least one byte.
+	 */
+	be,pt		%icc, 1f
+	 nop
+	EX_LD(LOAD(ldub, %o0 + 0x00, %o4))
+	add		%o0, 1, %o0
+	sub		%o2, 1, %o2
+	EX_ST(STORE(stb, %o4, %o1 + 0x00))
+	add		%o1, 1, %o1
+1:	andcc		%o0, 0x2, %g0
+	be,pn		%icc, 80f
+	 cmp		%o2, 2
+	blu,pn		%icc, 60f
+	 nop
+	EX_LD(LOAD(lduh, %o0 + 0x00, %o5))
+	add		%o0, 2, %o0
+	sub		%o2, 2, %o2
+	EX_ST(STORE(sth, %o5, %o1 + 0x00))
+	add		%o1, 2, %o1
+	ba,pt		%xcc, 80f
+	 add		%o5, %o4, %o4
+
+	.globl		FUNC_NAME
+FUNC_NAME:		/* %o0=src, %o1=dst, %o2=len, %o3=sum */
+	LOAD(prefetch, %o0 + 0x000, #n_reads)
+	xor		%o0, %o1, %g1
+	clr		%o4
+	andcc		%g1, 0x3, %g0
+	bne,pn		%icc, 95f
+	 LOAD(prefetch, %o0 + 0x040, #n_reads)
+	
+	brz,pn		%o2, 70f
+	 andcc		%o0, 0x3, %g0
+
+	/* We "remember" whether the lowest bit in the address
+	 * was set in GLOBAL_SPARE.  Because if it is, we have to swap
+	 * upper and lower 8 bit fields of the sum we calculate.
+	*/
+	bne,pn		%icc, 90b
+	 andcc		%o0, 0x1, GLOBAL_SPARE
+
+80:
+	LOAD(prefetch, %o0 + 0x080, #n_reads)
+	andncc		%o2, 0x3f, %g3
+
+	LOAD(prefetch, %o0 + 0x0c0, #n_reads)
+	sub		%o2, %g3, %o2
+	brz,pn		%g3, 2f
+	 LOAD(prefetch, %o0 + 0x100, #n_reads)
+
+	/* So that we don't need to use the non-pairing
+	 * add-with-carry instructions we accumulate 32-bit
+	 * values into a 64-bit register.  At the end of the
+	 * loop we fold it down to 32-bits and so on.
+	 */
+	ba,pt		%xcc, 1f
+	LOAD(prefetch, %o0 + 0x140, #n_reads)
+
+	.align		32
+1:	EX_LD(LOAD(lduw, %o0 + 0x00, %o5))
+	EX_LD(LOAD(lduw, %o0 + 0x04, %g1))
+	EX_LD(LOAD(lduw, %o0 + 0x08, %g2))
+	add		%o4, %o5, %o4
+	EX_ST(STORE(stw, %o5, %o1 + 0x00))
+	EX_LD(LOAD(lduw, %o0 + 0x0c, %o5))
+	add		%o4, %g1, %o4
+	EX_ST(STORE(stw, %g1, %o1 + 0x04))
+	EX_LD(LOAD(lduw, %o0 + 0x10, %g1))
+	add		%o4, %g2, %o4
+	EX_ST(STORE(stw, %g2, %o1 + 0x08))
+	EX_LD(LOAD(lduw, %o0 + 0x14, %g2))
+	add		%o4, %o5, %o4
+	EX_ST(STORE(stw, %o5, %o1 + 0x0c))
+	EX_LD(LOAD(lduw, %o0 + 0x18, %o5))
+	add		%o4, %g1, %o4
+	EX_ST(STORE(stw, %g1, %o1 + 0x10))
+	EX_LD(LOAD(lduw, %o0 + 0x1c, %g1))
+	add		%o4, %g2, %o4
+	EX_ST(STORE(stw, %g2, %o1 + 0x14))
+	EX_LD(LOAD(lduw, %o0 + 0x20, %g2))
+	add		%o4, %o5, %o4
+	EX_ST(STORE(stw, %o5, %o1 + 0x18))
+	EX_LD(LOAD(lduw, %o0 + 0x24, %o5))
+	add		%o4, %g1, %o4
+	EX_ST(STORE(stw, %g1, %o1 + 0x1c))
+	EX_LD(LOAD(lduw, %o0 + 0x28, %g1))
+	add		%o4, %g2, %o4
+	EX_ST(STORE(stw, %g2, %o1 + 0x20))
+	EX_LD(LOAD(lduw, %o0 + 0x2c, %g2))
+	add		%o4, %o5, %o4
+	EX_ST(STORE(stw, %o5, %o1 + 0x24))
+	EX_LD(LOAD(lduw, %o0 + 0x30, %o5))
+	add		%o4, %g1, %o4
+	EX_ST(STORE(stw, %g1, %o1 + 0x28))
+	EX_LD(LOAD(lduw, %o0 + 0x34, %g1))
+	add		%o4, %g2, %o4
+	EX_ST(STORE(stw, %g2, %o1 + 0x2c))
+	EX_LD(LOAD(lduw, %o0 + 0x38, %g2))
+	add		%o4, %o5, %o4
+	EX_ST(STORE(stw, %o5, %o1 + 0x30))
+	EX_LD(LOAD(lduw, %o0 + 0x3c, %o5))
+	add		%o4, %g1, %o4
+	EX_ST(STORE(stw, %g1, %o1 + 0x34))
+	LOAD(prefetch, %o0 + 0x180, #n_reads)
+	add		%o4, %g2, %o4
+	EX_ST(STORE(stw, %g2, %o1 + 0x38))
+	subcc		%g3, 0x40, %g3
+	add		%o0, 0x40, %o0
+	add		%o4, %o5, %o4
+	EX_ST(STORE(stw, %o5, %o1 + 0x3c))
+	bne,pt		%icc, 1b
+	 add		%o1, 0x40, %o1
+
+2:	and		%o2, 0x3c, %g3
+	brz,pn		%g3, 2f
+	 sub		%o2, %g3, %o2
+1:	EX_LD(LOAD(lduw, %o0 + 0x00, %o5))
+	subcc		%g3, 0x4, %g3
+	add		%o0, 0x4, %o0
+	add		%o4, %o5, %o4
+	EX_ST(STORE(stw, %o5, %o1 + 0x00))
+	bne,pt		%icc, 1b
+	 add		%o1, 0x4, %o1
+
+2:
+	/* fold 64-->32 */
+	srlx		%o4, 32, %o5
+	srl		%o4, 0, %o4
+	add		%o4, %o5, %o4
+	srlx		%o4, 32, %o5
+	srl		%o4, 0, %o4
+	add		%o4, %o5, %o4
+
+	/* fold 32-->16 */
+	sethi		%hi(0xffff0000), %g1
+	srl		%o4, 16, %o5
+	andn		%o4, %g1, %g2
+	add		%o5, %g2, %o4
+	srl		%o4, 16, %o5
+	andn		%o4, %g1, %g2
+	add		%o5, %g2, %o4
+
+60:
+	/* %o4 has the 16-bit sum we have calculated so-far.  */
+	cmp		%o2, 2
+	blu,pt		%icc, 1f
+	 nop
+	EX_LD(LOAD(lduh, %o0 + 0x00, %o5))
+	sub		%o2, 2, %o2
+	add		%o0, 2, %o0
+	add		%o4, %o5, %o4
+	EX_ST(STORE(sth, %o5, %o1 + 0x00))
+	add		%o1, 0x2, %o1
+1:	brz,pt		%o2, 1f
+	 nop
+	EX_LD(LOAD(ldub, %o0 + 0x00, %o5))
+	sub		%o2, 1, %o2
+	add		%o0, 1, %o0
+	EX_ST(STORE(stb, %o5, %o1 + 0x00))
+	sllx		%o5, 8, %o5
+	add		%o1, 1, %o1
+	add		%o4, %o5, %o4
+1:
+	/* fold 32-->16 */
+	sethi		%hi(0xffff0000), %g1
+	srl		%o4, 16, %o5
+	andn		%o4, %g1, %g2
+	add		%o5, %g2, %o4
+	srl		%o4, 16, %o5
+	andn		%o4, %g1, %g2
+	add		%o5, %g2, %o4
+
+1:	brz,pt		GLOBAL_SPARE, 1f
+	 nop
+
+	/* We started with an odd byte, byte-swap the result.  */
+	srl		%o4, 8, %o5
+	and		%o4, 0xff, %g1
+	sll		%g1, 8, %g1
+	or		%o5, %g1, %o4
+
+1:	add		%o3, %o4, %o3
+
+70:
+	retl
+	 mov		%o3, %o0
+
+95:	mov		0, GLOBAL_SPARE
+	brlez,pn	%o2, 4f
+	 andcc		%o0, 1, %o5		
+	be,a,pt		%icc, 1f
+	 srl		%o2, 1, %g1		
+	sub		%o2, 1, %o2	
+	EX_LD(LOAD(ldub, %o0, GLOBAL_SPARE))
+	add		%o0, 1, %o0	
+	EX_ST(STORE(stb, GLOBAL_SPARE, %o1))
+	srl		%o2, 1, %g1
+	add		%o1, 1, %o1
+1:	brz,a,pn	%g1, 3f
+	 andcc		%o2, 1, %g0
+	andcc		%o0, 2, %g0	
+	be,a,pt		%icc, 1f
+	 srl		%g1, 1, %g1
+	EX_LD(LOAD(lduh, %o0, %o4))
+	sub		%o2, 2, %o2	
+	srl		%o4, 8, %g2
+	sub		%g1, 1, %g1	
+	EX_ST(STORE(stb, %g2, %o1))
+	add		%o4, GLOBAL_SPARE, GLOBAL_SPARE
+	EX_ST(STORE(stb, %o4, %o1 + 1))
+	add		%o0, 2, %o0	
+	srl		%g1, 1, %g1
+	add		%o1, 2, %o1
+1:	brz,a,pn	%g1, 2f		
+	 andcc		%o2, 2, %g0
+	EX_LD(LOAD(lduw, %o0, %o4))
+5:	srl		%o4, 24, %g2
+	srl		%o4, 16, %g3
+	EX_ST(STORE(stb, %g2, %o1))
+	srl		%o4, 8, %g2
+	EX_ST(STORE(stb, %g3, %o1 + 1))
+	add		%o0, 4, %o0
+	EX_ST(STORE(stb, %g2, %o1 + 2))
+	addcc		%o4, GLOBAL_SPARE, GLOBAL_SPARE
+	EX_ST(STORE(stb, %o4, %o1 + 3))
+	addc		GLOBAL_SPARE, %g0, GLOBAL_SPARE
+	add		%o1, 4, %o1
+	subcc		%g1, 1, %g1
+	bne,a,pt	%icc, 5b
+	 EX_LD(LOAD(lduw, %o0, %o4))
+	sll		GLOBAL_SPARE, 16, %g2
+	srl		GLOBAL_SPARE, 16, GLOBAL_SPARE
+	srl		%g2, 16, %g2
+	andcc		%o2, 2, %g0
+	add		%g2, GLOBAL_SPARE, GLOBAL_SPARE 
+2:	be,a,pt		%icc, 3f		
+	 andcc		%o2, 1, %g0
+	EX_LD(LOAD(lduh, %o0, %o4))
+	andcc		%o2, 1, %g0
+	srl		%o4, 8, %g2
+	add		%o0, 2, %o0	
+	EX_ST(STORE(stb, %g2, %o1))
+	add		GLOBAL_SPARE, %o4, GLOBAL_SPARE
+	EX_ST(STORE(stb, %o4, %o1 + 1))
+	add		%o1, 2, %o1
+3:	be,a,pt		%icc, 1f		
+	 sll		GLOBAL_SPARE, 16, %o4
+	EX_LD(LOAD(ldub, %o0, %g2))
+	sll		%g2, 8, %o4	
+	EX_ST(STORE(stb, %g2, %o1))
+	add		GLOBAL_SPARE, %o4, GLOBAL_SPARE
+	sll		GLOBAL_SPARE, 16, %o4
+1:	addcc		%o4, GLOBAL_SPARE, GLOBAL_SPARE
+	srl		GLOBAL_SPARE, 16, %o4
+	addc		%g0, %o4, GLOBAL_SPARE
+	brz,pt		%o5, 4f
+	 srl		GLOBAL_SPARE, 8, %o4
+	and		GLOBAL_SPARE, 0xff, %g2
+	and		%o4, 0xff, %o4
+	sll		%g2, 8, %g2
+	or		%g2, %o4, GLOBAL_SPARE
+4:	addcc		%o3, GLOBAL_SPARE, %o3
+	addc		%g0, %o3, %o0
+	retl
+	 srl		%o0, 0, %o0
+	.size		FUNC_NAME, .-FUNC_NAME
diff --git a/arch/sparc64/lib/csum_copy_from_user.S b/arch/sparc64/lib/csum_copy_from_user.S
new file mode 100644
index 0000000..817ebda
--- /dev/null
+++ b/arch/sparc64/lib/csum_copy_from_user.S
@@ -0,0 +1,21 @@
+/* csum_copy_from_user.S: Checksum+copy from userspace.
+ *
+ * Copyright (C) 2005 David S. Miller (davem@davemloft.net)
+ */
+
+#define EX_LD(x)		\
+98:	x;			\
+	.section .fixup;	\
+	.align 4;		\
+99:	retl;			\
+	 mov	-1, %o0;	\
+	.section __ex_table;	\
+	.align 4;		\
+	.word 98b, 99b;		\
+	.text;			\
+	.align 4;
+
+#define FUNC_NAME		__csum_partial_copy_from_user
+#define LOAD(type,addr,dest)	type##a [addr] %asi, dest
+
+#include "csum_copy.S"
diff --git a/arch/sparc64/lib/csum_copy_to_user.S b/arch/sparc64/lib/csum_copy_to_user.S
new file mode 100644
index 0000000..c2f9463
--- /dev/null
+++ b/arch/sparc64/lib/csum_copy_to_user.S
@@ -0,0 +1,21 @@
+/* csum_copy_to_user.S: Checksum+copy to userspace.
+ *
+ * Copyright (C) 2005 David S. Miller (davem@davemloft.net)
+ */
+
+#define EX_ST(x)		\
+98:	x;			\
+	.section .fixup;	\
+	.align 4;		\
+99:	retl;			\
+	 mov	-1, %o0;	\
+	.section __ex_table;	\
+	.align 4;		\
+	.word 98b, 99b;		\
+	.text;			\
+	.align 4;
+
+#define FUNC_NAME		__csum_partial_copy_to_user
+#define STORE(type,src,addr)	type##a src, [addr] %asi
+
+#include "csum_copy.S"
diff --git a/arch/sparc64/lib/debuglocks.c b/arch/sparc64/lib/debuglocks.c
new file mode 100644
index 0000000..c421e0c
--- /dev/null
+++ b/arch/sparc64/lib/debuglocks.c
@@ -0,0 +1,376 @@
+/* $Id: debuglocks.c,v 1.9 2001/11/17 00:10:48 davem Exp $
+ * debuglocks.c: Debugging versions of SMP locking primitives.
+ *
+ * Copyright (C) 1998 David S. Miller (davem@redhat.com)
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <asm/system.h>
+
+#ifdef CONFIG_SMP
+
+#define GET_CALLER(PC) __asm__ __volatile__("mov %%i7, %0" : "=r" (PC))
+
+static inline void show (char *str, spinlock_t *lock, unsigned long caller)
+{
+	int cpu = smp_processor_id();
+
+	printk("%s(%p) CPU#%d stuck at %08x, owner PC(%08x):CPU(%x)\n",
+	       str, lock, cpu, (unsigned int) caller,
+	       lock->owner_pc, lock->owner_cpu);
+}
+
+static inline void show_read (char *str, rwlock_t *lock, unsigned long caller)
+{
+	int cpu = smp_processor_id();
+
+	printk("%s(%p) CPU#%d stuck at %08x, writer PC(%08x):CPU(%x)\n",
+	       str, lock, cpu, (unsigned int) caller,
+	       lock->writer_pc, lock->writer_cpu);
+}
+
+static inline void show_write (char *str, rwlock_t *lock, unsigned long caller)
+{
+	int cpu = smp_processor_id();
+	int i;
+
+	printk("%s(%p) CPU#%d stuck at %08x\n",
+	       str, lock, cpu, (unsigned int) caller);
+	printk("Writer: PC(%08x):CPU(%x)\n",
+	       lock->writer_pc, lock->writer_cpu);
+	printk("Readers:");
+	for (i = 0; i < NR_CPUS; i++)
+		if (lock->reader_pc[i])
+			printk(" %d[%08x]", i, lock->reader_pc[i]);
+	printk("\n");
+}
+
+#undef INIT_STUCK
+#define INIT_STUCK 100000000
+
+void _do_spin_lock(spinlock_t *lock, char *str)
+{
+	unsigned long caller, val;
+	int stuck = INIT_STUCK;
+	int cpu = get_cpu();
+	int shown = 0;
+
+	GET_CALLER(caller);
+again:
+	__asm__ __volatile__("ldstub [%1], %0"
+			     : "=r" (val)
+			     : "r" (&(lock->lock))
+			     : "memory");
+	membar("#StoreLoad | #StoreStore");
+	if (val) {
+		while (lock->lock) {
+			if (!--stuck) {
+				if (shown++ <= 2)
+					show(str, lock, caller);
+				stuck = INIT_STUCK;
+			}
+			membar("#LoadLoad");
+		}
+		goto again;
+	}
+	lock->owner_pc = ((unsigned int)caller);
+	lock->owner_cpu = cpu;
+	current->thread.smp_lock_count++;
+	current->thread.smp_lock_pc = ((unsigned int)caller);
+
+	put_cpu();
+}
+
+int _do_spin_trylock(spinlock_t *lock)
+{
+	unsigned long val, caller;
+	int cpu = get_cpu();
+
+	GET_CALLER(caller);
+	__asm__ __volatile__("ldstub [%1], %0"
+			     : "=r" (val)
+			     : "r" (&(lock->lock))
+			     : "memory");
+	membar("#StoreLoad | #StoreStore");
+	if (!val) {
+		lock->owner_pc = ((unsigned int)caller);
+		lock->owner_cpu = cpu;
+		current->thread.smp_lock_count++;
+		current->thread.smp_lock_pc = ((unsigned int)caller);
+	}
+
+	put_cpu();
+
+	return val == 0;
+}
+
+void _do_spin_unlock(spinlock_t *lock)
+{
+	lock->owner_pc = 0;
+	lock->owner_cpu = NO_PROC_ID;
+	membar("#StoreStore | #LoadStore");
+	lock->lock = 0;
+	current->thread.smp_lock_count--;
+}
+
+/* Keep INIT_STUCK the same... */
+
+void _do_read_lock (rwlock_t *rw, char *str)
+{
+	unsigned long caller, val;
+	int stuck = INIT_STUCK;
+	int cpu = get_cpu();
+	int shown = 0;
+
+	GET_CALLER(caller);
+wlock_again:
+	/* Wait for any writer to go away.  */
+	while (((long)(rw->lock)) < 0) {
+		if (!--stuck) {
+			if (shown++ <= 2)
+				show_read(str, rw, caller);
+			stuck = INIT_STUCK;
+		}
+		membar("#LoadLoad");
+	}
+	/* Try once to increment the counter.  */
+	__asm__ __volatile__(
+"	ldx		[%0], %%g1\n"
+"	brlz,a,pn	%%g1, 2f\n"
+"	 mov		1, %0\n"
+"	add		%%g1, 1, %%g7\n"
+"	casx		[%0], %%g1, %%g7\n"
+"	sub		%%g1, %%g7, %0\n"
+"2:"	: "=r" (val)
+	: "0" (&(rw->lock))
+	: "g1", "g7", "memory");
+	membar("#StoreLoad | #StoreStore");
+	if (val)
+		goto wlock_again;
+	rw->reader_pc[cpu] = ((unsigned int)caller);
+	current->thread.smp_lock_count++;
+	current->thread.smp_lock_pc = ((unsigned int)caller);
+
+	put_cpu();
+}
+
+void _do_read_unlock (rwlock_t *rw, char *str)
+{
+	unsigned long caller, val;
+	int stuck = INIT_STUCK;
+	int cpu = get_cpu();
+	int shown = 0;
+
+	GET_CALLER(caller);
+
+	/* Drop our identity _first_. */
+	rw->reader_pc[cpu] = 0;
+	current->thread.smp_lock_count--;
+runlock_again:
+	/* Spin trying to decrement the counter using casx.  */
+	__asm__ __volatile__(
+"	membar	#StoreLoad | #LoadLoad\n"
+"	ldx	[%0], %%g1\n"
+"	sub	%%g1, 1, %%g7\n"
+"	casx	[%0], %%g1, %%g7\n"
+"	membar	#StoreLoad | #StoreStore\n"
+"	sub	%%g1, %%g7, %0\n"
+	: "=r" (val)
+	: "0" (&(rw->lock))
+	: "g1", "g7", "memory");
+	if (val) {
+		if (!--stuck) {
+			if (shown++ <= 2)
+				show_read(str, rw, caller);
+			stuck = INIT_STUCK;
+		}
+		goto runlock_again;
+	}
+
+	put_cpu();
+}
+
+void _do_write_lock (rwlock_t *rw, char *str)
+{
+	unsigned long caller, val;
+	int stuck = INIT_STUCK;
+	int cpu = get_cpu();
+	int shown = 0;
+
+	GET_CALLER(caller);
+wlock_again:
+	/* Spin while there is another writer. */
+	while (((long)rw->lock) < 0) {
+		if (!--stuck) {
+			if (shown++ <= 2)
+				show_write(str, rw, caller);
+			stuck = INIT_STUCK;
+		}
+		membar("#LoadLoad");
+	}
+
+	/* Try to acuire the write bit.  */
+	__asm__ __volatile__(
+"	mov	1, %%g3\n"
+"	sllx	%%g3, 63, %%g3\n"
+"	ldx	[%0], %%g1\n"
+"	brlz,pn	%%g1, 1f\n"
+"	 or	%%g1, %%g3, %%g7\n"
+"	casx	[%0], %%g1, %%g7\n"
+"	membar	#StoreLoad | #StoreStore\n"
+"	ba,pt	%%xcc, 2f\n"
+"	 sub	%%g1, %%g7, %0\n"
+"1:	mov	1, %0\n"
+"2:"	: "=r" (val)
+	: "0" (&(rw->lock))
+	: "g3", "g1", "g7", "memory");
+	if (val) {
+		/* We couldn't get the write bit. */
+		if (!--stuck) {
+			if (shown++ <= 2)
+				show_write(str, rw, caller);
+			stuck = INIT_STUCK;
+		}
+		goto wlock_again;
+	}
+	if ((rw->lock & ((1UL<<63)-1UL)) != 0UL) {
+		/* Readers still around, drop the write
+		 * lock, spin, and try again.
+		 */
+		if (!--stuck) {
+			if (shown++ <= 2)
+				show_write(str, rw, caller);
+			stuck = INIT_STUCK;
+		}
+		__asm__ __volatile__(
+"		mov	1, %%g3\n"
+"		sllx	%%g3, 63, %%g3\n"
+"1:		ldx	[%0], %%g1\n"
+"		andn	%%g1, %%g3, %%g7\n"
+"		casx	[%0], %%g1, %%g7\n"
+"		cmp	%%g1, %%g7\n"
+"		bne,pn	%%xcc, 1b\n"
+"		 membar	#StoreLoad | #StoreStore"
+		: /* no outputs */
+		: "r" (&(rw->lock))
+		: "g3", "g1", "g7", "cc", "memory");
+		while(rw->lock != 0) {
+			if (!--stuck) {
+				if (shown++ <= 2)
+					show_write(str, rw, caller);
+				stuck = INIT_STUCK;
+			}
+			membar("#LoadLoad");
+		}
+		goto wlock_again;
+	}
+
+	/* We have it, say who we are. */
+	rw->writer_pc = ((unsigned int)caller);
+	rw->writer_cpu = cpu;
+	current->thread.smp_lock_count++;
+	current->thread.smp_lock_pc = ((unsigned int)caller);
+
+	put_cpu();
+}
+
+void _do_write_unlock(rwlock_t *rw)
+{
+	unsigned long caller, val;
+	int stuck = INIT_STUCK;
+	int shown = 0;
+
+	GET_CALLER(caller);
+
+	/* Drop our identity _first_ */
+	rw->writer_pc = 0;
+	rw->writer_cpu = NO_PROC_ID;
+	current->thread.smp_lock_count--;
+wlock_again:
+	__asm__ __volatile__(
+"	membar	#StoreLoad | #LoadLoad\n"
+"	mov	1, %%g3\n"
+"	sllx	%%g3, 63, %%g3\n"
+"	ldx	[%0], %%g1\n"
+"	andn	%%g1, %%g3, %%g7\n"
+"	casx	[%0], %%g1, %%g7\n"
+"	membar	#StoreLoad | #StoreStore\n"
+"	sub	%%g1, %%g7, %0\n"
+	: "=r" (val)
+	: "0" (&(rw->lock))
+	: "g3", "g1", "g7", "memory");
+	if (val) {
+		if (!--stuck) {
+			if (shown++ <= 2)
+				show_write("write_unlock", rw, caller);
+			stuck = INIT_STUCK;
+		}
+		goto wlock_again;
+	}
+}
+
+int _do_write_trylock (rwlock_t *rw, char *str)
+{
+	unsigned long caller, val;
+	int cpu = get_cpu();
+
+	GET_CALLER(caller);
+
+	/* Try to acuire the write bit.  */
+	__asm__ __volatile__(
+"	mov	1, %%g3\n"
+"	sllx	%%g3, 63, %%g3\n"
+"	ldx	[%0], %%g1\n"
+"	brlz,pn	%%g1, 1f\n"
+"	 or	%%g1, %%g3, %%g7\n"
+"	casx	[%0], %%g1, %%g7\n"
+"	membar	#StoreLoad | #StoreStore\n"
+"	ba,pt	%%xcc, 2f\n"
+"	 sub	%%g1, %%g7, %0\n"
+"1:	mov	1, %0\n"
+"2:"	: "=r" (val)
+	: "0" (&(rw->lock))
+	: "g3", "g1", "g7", "memory");
+
+	if (val) {
+		put_cpu();
+		return 0;
+	}
+
+	if ((rw->lock & ((1UL<<63)-1UL)) != 0UL) {
+		/* Readers still around, drop the write
+		 * lock, return failure.
+		 */
+		__asm__ __volatile__(
+"		mov	1, %%g3\n"
+"		sllx	%%g3, 63, %%g3\n"
+"1:		ldx	[%0], %%g1\n"
+"		andn	%%g1, %%g3, %%g7\n"
+"		casx	[%0], %%g1, %%g7\n"
+"		cmp	%%g1, %%g7\n"
+"		bne,pn	%%xcc, 1b\n"
+"		 membar	#StoreLoad | #StoreStore"
+		: /* no outputs */
+		: "r" (&(rw->lock))
+		: "g3", "g1", "g7", "cc", "memory");
+
+		put_cpu();
+
+		return 0;
+	}
+
+	/* We have it, say who we are. */
+	rw->writer_pc = ((unsigned int)caller);
+	rw->writer_cpu = cpu;
+	current->thread.smp_lock_count++;
+	current->thread.smp_lock_pc = ((unsigned int)caller);
+
+	put_cpu();
+
+	return 1;
+}
+
+#endif /* CONFIG_SMP */
diff --git a/arch/sparc64/lib/dec_and_lock.S b/arch/sparc64/lib/dec_and_lock.S
new file mode 100644
index 0000000..7e6fdae
--- /dev/null
+++ b/arch/sparc64/lib/dec_and_lock.S
@@ -0,0 +1,78 @@
+/* $Id: dec_and_lock.S,v 1.5 2001/11/18 00:12:56 davem Exp $
+ * dec_and_lock.S: Sparc64 version of "atomic_dec_and_lock()"
+ *                 using cas and ldstub instructions.
+ *
+ * Copyright (C) 2000 David S. Miller (davem@redhat.com)
+ */
+#include <linux/config.h>
+#include <asm/thread_info.h>
+
+	.text
+	.align	64
+
+	/* CAS basically works like this:
+	 *
+	 * void CAS(MEM, REG1, REG2)
+	 * {
+	 *   START_ATOMIC();
+	 *   if (*(MEM) == REG1) {
+	 *     TMP = *(MEM);
+	 *     *(MEM) = REG2;
+	 *     REG2 = TMP;
+	 *   } else
+	 *     REG2 = *(MEM);
+	 *   END_ATOMIC();
+	 * }
+	 */
+
+	.globl	_atomic_dec_and_lock
+_atomic_dec_and_lock:	/* %o0 = counter, %o1 = lock */
+loop1:	lduw	[%o0], %g2
+	subcc	%g2, 1, %g7
+	be,pn	%icc, start_to_zero
+	 nop
+nzero:	cas	[%o0], %g2, %g7
+	cmp	%g2, %g7
+	bne,pn	%icc, loop1
+	 mov	0, %g1
+
+out:
+	membar	#StoreLoad | #StoreStore
+	retl
+	 mov	%g1, %o0
+start_to_zero:
+#ifdef CONFIG_PREEMPT
+	ldsw	[%g6 + TI_PRE_COUNT], %g3
+	add	%g3, 1, %g3
+	stw	%g3, [%g6 + TI_PRE_COUNT]
+#endif
+to_zero:
+	ldstub	[%o1], %g3
+	brnz,pn	%g3, spin_on_lock
+	 membar	#StoreLoad | #StoreStore
+loop2:	cas	[%o0], %g2, %g7		/* ASSERT(g7 == 0) */
+	cmp	%g2, %g7
+
+	be,pt	%icc, out
+	 mov	1, %g1
+	lduw	[%o0], %g2
+	subcc	%g2, 1, %g7
+	be,pn	%icc, loop2
+	 nop
+	membar	#StoreStore | #LoadStore
+	stb	%g0, [%o1]
+#ifdef CONFIG_PREEMPT
+	ldsw	[%g6 + TI_PRE_COUNT], %g3
+	sub	%g3, 1, %g3
+	stw	%g3, [%g6 + TI_PRE_COUNT]
+#endif
+
+	b,pt	%xcc, nzero
+	 nop
+spin_on_lock:
+	ldub	[%o1], %g3
+	brnz,pt	%g3, spin_on_lock
+	 membar	#LoadLoad
+	ba,pt	%xcc, to_zero
+	 nop
+	nop
diff --git a/arch/sparc64/lib/delay.c b/arch/sparc64/lib/delay.c
new file mode 100644
index 0000000..f6b4c78
--- /dev/null
+++ b/arch/sparc64/lib/delay.c
@@ -0,0 +1,49 @@
+/* delay.c: Delay loops for sparc64
+ *
+ * Copyright (C) 2004 David S. Miller <davem@redhat.com>
+ *
+ * Based heavily upon x86 variant which is:
+ *	Copyright (C) 1993 Linus Torvalds
+ *	Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
+ */
+
+#include <linux/delay.h>
+
+void __delay(unsigned long loops)
+{
+	__asm__ __volatile__(
+"	b,pt	%%xcc, 1f\n"
+"	 cmp	%0, 0\n"
+"	.align	32\n"
+"1:\n"
+"	bne,pt	%%xcc, 1b\n"
+"	 subcc	%0, 1, %0\n"
+	: "=&r" (loops)
+	: "0" (loops)
+	: "cc");
+}
+
+/* We used to multiply by HZ after shifting down by 32 bits
+ * but that runs into problems for higher values of HZ and
+ * slow cpus.
+ */
+void __const_udelay(unsigned long n)
+{
+	n *= 4;
+
+	n *= (cpu_data(_smp_processor_id()).udelay_val * (HZ/4));
+	n >>= 32;
+
+	__delay(n + 1);
+}
+
+void __udelay(unsigned long n)
+{
+	__const_udelay(n * 0x10c7UL);
+}
+
+
+void __ndelay(unsigned long n)
+{
+	__const_udelay(n * 0x5UL);
+}
diff --git a/arch/sparc64/lib/find_bit.c b/arch/sparc64/lib/find_bit.c
new file mode 100644
index 0000000..6059557
--- /dev/null
+++ b/arch/sparc64/lib/find_bit.c
@@ -0,0 +1,127 @@
+#include <linux/bitops.h>
+
+/**
+ * find_next_bit - find the next set bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The maximum size to search
+ */
+unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
+				unsigned long offset)
+{
+	const unsigned long *p = addr + (offset >> 6);
+	unsigned long result = offset & ~63UL;
+	unsigned long tmp;
+
+	if (offset >= size)
+		return size;
+	size -= result;
+	offset &= 63UL;
+	if (offset) {
+		tmp = *(p++);
+		tmp &= (~0UL << offset);
+		if (size < 64)
+			goto found_first;
+		if (tmp)
+			goto found_middle;
+		size -= 64;
+		result += 64;
+	}
+	while (size & ~63UL) {
+		if ((tmp = *(p++)))
+			goto found_middle;
+		result += 64;
+		size -= 64;
+	}
+	if (!size)
+		return result;
+	tmp = *p;
+
+found_first:
+	tmp &= (~0UL >> (64 - size));
+	if (tmp == 0UL)        /* Are any bits set? */
+		return result + size; /* Nope. */
+found_middle:
+	return result + __ffs(tmp);
+}
+
+/* find_next_zero_bit() finds the first zero bit in a bit string of length
+ * 'size' bits, starting the search at bit 'offset'. This is largely based
+ * on Linus's ALPHA routines, which are pretty portable BTW.
+ */
+
+unsigned long find_next_zero_bit(const unsigned long *addr,
+			unsigned long size, unsigned long offset)
+{
+	const unsigned long *p = addr + (offset >> 6);
+	unsigned long result = offset & ~63UL;
+	unsigned long tmp;
+
+	if (offset >= size)
+		return size;
+	size -= result;
+	offset &= 63UL;
+	if (offset) {
+		tmp = *(p++);
+		tmp |= ~0UL >> (64-offset);
+		if (size < 64)
+			goto found_first;
+		if (~tmp)
+			goto found_middle;
+		size -= 64;
+		result += 64;
+	}
+	while (size & ~63UL) {
+		if (~(tmp = *(p++)))
+			goto found_middle;
+		result += 64;
+		size -= 64;
+	}
+	if (!size)
+		return result;
+	tmp = *p;
+
+found_first:
+	tmp |= ~0UL << size;
+	if (tmp == ~0UL)        /* Are any bits zero? */
+		return result + size; /* Nope. */
+found_middle:
+	return result + ffz(tmp);
+}
+
+unsigned long find_next_zero_le_bit(unsigned long *addr, unsigned long size, unsigned long offset)
+{
+	unsigned long *p = addr + (offset >> 6);
+	unsigned long result = offset & ~63UL;
+	unsigned long tmp;
+
+	if (offset >= size)
+		return size;
+	size -= result;
+	offset &= 63UL;
+	if(offset) {
+		tmp = __swab64p(p++);
+		tmp |= (~0UL >> (64-offset));
+		if(size < 64)
+			goto found_first;
+		if(~tmp)
+			goto found_middle;
+		size -= 64;
+		result += 64;
+	}
+	while(size & ~63) {
+		if(~(tmp = __swab64p(p++)))
+			goto found_middle;
+		result += 64;
+		size -= 64;
+	}
+	if(!size)
+		return result;
+	tmp = __swab64p(p);
+found_first:
+	tmp |= (~0UL << size);
+	if (tmp == ~0UL)        /* Are any bits zero? */
+		return result + size; /* Nope. */
+found_middle:
+	return result + ffz(tmp);
+}
diff --git a/arch/sparc64/lib/iomap.c b/arch/sparc64/lib/iomap.c
new file mode 100644
index 0000000..ac556db
--- /dev/null
+++ b/arch/sparc64/lib/iomap.c
@@ -0,0 +1,48 @@
+/*
+ * Implement the sparc64 iomap interfaces
+ */
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <asm/io.h>
+
+/* Create a virtual mapping cookie for an IO port range */
+void __iomem *ioport_map(unsigned long port, unsigned int nr)
+{
+	return (void __iomem *) (unsigned long) port;
+}
+
+void ioport_unmap(void __iomem *addr)
+{
+	/* Nothing to do */
+}
+EXPORT_SYMBOL(ioport_map);
+EXPORT_SYMBOL(ioport_unmap);
+
+/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
+void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
+{
+	unsigned long start = pci_resource_start(dev, bar);
+	unsigned long len = pci_resource_len(dev, bar);
+	unsigned long flags = pci_resource_flags(dev, bar);
+
+	if (!len || !start)
+		return NULL;
+	if (maxlen && len > maxlen)
+		len = maxlen;
+	if (flags & IORESOURCE_IO)
+		return ioport_map(start, len);
+	if (flags & IORESOURCE_MEM) {
+		if (flags & IORESOURCE_CACHEABLE)
+			return ioremap(start, len);
+		return ioremap_nocache(start, len);
+	}
+	/* What? */
+	return NULL;
+}
+
+void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
+{
+	/* nothing to do */
+}
+EXPORT_SYMBOL(pci_iomap);
+EXPORT_SYMBOL(pci_iounmap);
diff --git a/arch/sparc64/lib/ipcsum.S b/arch/sparc64/lib/ipcsum.S
new file mode 100644
index 0000000..58ca5b9
--- /dev/null
+++ b/arch/sparc64/lib/ipcsum.S
@@ -0,0 +1,34 @@
+	.text
+	.align	32
+	.globl	ip_fast_csum
+	.type	ip_fast_csum,#function
+ip_fast_csum:	/* %o0 = iph, %o1 = ihl */
+	sub	%o1, 4, %g7
+	lduw	[%o0 + 0x00], %o2
+	lduw	[%o0 + 0x04], %g2
+	lduw	[%o0 + 0x08], %g3
+	addcc	%g2, %o2, %o2
+	lduw	[%o0 + 0x0c], %g2
+	addccc	%g3, %o2, %o2
+	lduw	[%o0 + 0x10], %g3
+
+	addccc	%g2, %o2, %o2
+	addc	%o2, %g0, %o2
+1:	addcc	%g3, %o2, %o2
+	add	%o0, 4, %o0
+	addccc	%o2, %g0, %o2
+	subcc	%g7, 1, %g7
+	be,a,pt	%icc, 2f
+	 sll	%o2, 16, %g2
+
+	lduw	[%o0 + 0x10], %g3
+	ba,pt	%xcc, 1b
+	 nop
+2:	addcc	%o2, %g2, %g2
+	srl	%g2, 16, %o2
+	addc	%o2, %g0, %o2
+	xnor	%g0, %o2, %o2
+	set	0xffff, %o1
+	retl
+	 and	%o2, %o1, %o0
+	.size	ip_fast_csum, .-ip_fast_csum
diff --git a/arch/sparc64/lib/mcount.S b/arch/sparc64/lib/mcount.S
new file mode 100644
index 0000000..2ef2e26
--- /dev/null
+++ b/arch/sparc64/lib/mcount.S
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com)
+ *
+ * This file implements mcount(), which is used to collect profiling data.
+ * This can also be tweaked for kernel stack overflow detection.
+ */
+
+#include <linux/config.h>
+#include <linux/linkage.h>
+
+#include <asm/ptrace.h>
+#include <asm/thread_info.h>
+
+/*
+ * This is the main variant and is called by C code.  GCC's -pg option
+ * automatically instruments every C function with a call to this.
+ */
+
+#ifdef CONFIG_STACK_DEBUG
+
+#define OVSTACKSIZE	4096		/* lets hope this is enough */
+
+	.data
+	.align		8
+panicstring:
+	.asciz		"Stack overflow\n"
+	.align		8
+ovstack:
+	.skip		OVSTACKSIZE
+#endif
+	.text
+	.align 32
+	.globl mcount, _mcount
+mcount:
+_mcount:
+#ifdef CONFIG_STACK_DEBUG
+	/*
+	 * Check whether %sp is dangerously low.
+	 */
+	ldub		[%g6 + TI_FPDEPTH], %g1
+	srl		%g1, 1, %g3
+	add		%g3, 1, %g3
+	sllx		%g3, 8, %g3			! each fpregs frame is 256b
+	add		%g3, 192, %g3
+	add		%g6, %g3, %g3			! where does task_struct+frame end?
+	sub		%g3, STACK_BIAS, %g3
+	cmp		%sp, %g3
+	bg,pt		%xcc, 1f
+	 sethi		%hi(panicstring), %g3
+	sethi		%hi(ovstack), %g7		! cant move to panic stack fast enough
+	 or		%g7, %lo(ovstack), %g7
+	add		%g7, OVSTACKSIZE, %g7
+	sub		%g7, STACK_BIAS, %g7
+	mov		%g7, %sp
+	call		prom_printf
+	 or		%g3, %lo(panicstring), %o0
+	call		prom_halt
+	 nop
+#endif
+1:	retl
+	 nop
diff --git a/arch/sparc64/lib/memcmp.S b/arch/sparc64/lib/memcmp.S
new file mode 100644
index 0000000..c90ad96
--- /dev/null
+++ b/arch/sparc64/lib/memcmp.S
@@ -0,0 +1,28 @@
+/* $Id: memcmp.S,v 1.3 2000/03/23 07:51:08 davem Exp $
+ * Sparc64 optimized memcmp code.
+ *
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ * Copyright (C) 2000 David S. Miller (davem@redhat.com)
+ */
+
+	.text
+	.align	32
+	.globl	__memcmp, memcmp
+__memcmp:
+memcmp:
+	cmp	%o2, 0		! IEU1	Group
+loop:	be,pn	%icc, ret_0	! CTI
+	 nop			! IEU0
+	ldub	[%o0], %g7	! LSU	Group
+	ldub	[%o1], %g3	! LSU	Group
+	sub	%o2, 1, %o2	! IEU0
+	add	%o0, 1, %o0	! IEU1
+	add	%o1, 1, %o1	! IEU0	Group
+	subcc	%g7, %g3, %g3	! IEU1	Group
+	be,pt	%icc, loop	! CTI
+	 cmp	%o2, 0		! IEU1	Group
+
+ret_n0:	retl
+	 mov	%g3, %o0
+ret_0:	retl
+	 mov	0, %o0
diff --git a/arch/sparc64/lib/memmove.S b/arch/sparc64/lib/memmove.S
new file mode 100644
index 0000000..9739580
--- /dev/null
+++ b/arch/sparc64/lib/memmove.S
@@ -0,0 +1,31 @@
+/* memmove.S: Simple memmove implementation.
+ *
+ * Copyright (C) 1997, 2004 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1996, 1997, 1998, 1999 Jakub Jelinek (jj@ultra.linux.cz)
+ */
+
+	.text
+	.align		32
+	.globl		memmove
+	.type		memmove,#function
+memmove:		/* o0=dst o1=src o2=len */
+	mov		%o0, %g1
+	cmp		%o0, %o1
+	bleu,pt		%xcc, memcpy
+	 add		%o1, %o2, %g7
+	cmp		%g7, %o0
+	bleu,pt		%xcc, memcpy
+	 add		%o0, %o2, %o5
+	sub		%g7, 1, %o1
+
+	sub		%o5, 1, %o0
+1:	ldub		[%o1], %g7
+	subcc		%o2, 1, %o2
+	sub		%o1, 1, %o1
+	stb		%g7, [%o0]
+	bne,pt		%icc, 1b
+	 sub		%o0, 1, %o0
+
+	retl
+	 mov		%g1, %o0
+	.size		memmove, .-memmove
diff --git a/arch/sparc64/lib/memscan.S b/arch/sparc64/lib/memscan.S
new file mode 100644
index 0000000..5e72d49
--- /dev/null
+++ b/arch/sparc64/lib/memscan.S
@@ -0,0 +1,129 @@
+/* $Id: memscan.S,v 1.3 2000/01/31 04:59:10 davem Exp $
+ * memscan.S: Optimized memscan for Sparc64.
+ *
+ * Copyright (C) 1997,1998 Jakub Jelinek (jj@ultra.linux.cz)
+ * Copyright (C) 1998 David S. Miller (davem@redhat.com)
+ */
+
+#define HI_MAGIC	0x8080808080808080
+#define LO_MAGIC	0x0101010101010101
+#define ASI_PL		0x88
+
+	.text
+	.align	32
+	.globl		__memscan_zero, __memscan_generic
+	.globl		memscan
+
+__memscan_zero:
+	/* %o0 = bufp, %o1 = size */
+	brlez,pn	%o1, szzero
+	 andcc		%o0, 7, %g0
+	be,pt		%icc, we_are_aligned
+	 sethi		%hi(HI_MAGIC), %o4
+	ldub		[%o0], %o5
+1:	subcc		%o1, 1, %o1
+	brz,pn		%o5, 10f
+	 add		%o0, 1, %o0
+
+	be,pn		%xcc, szzero
+	 andcc		%o0, 7, %g0
+	bne,a,pn	%icc, 1b
+	 ldub		[%o0], %o5
+we_are_aligned:
+	ldxa		[%o0] ASI_PL, %o5
+	or		%o4, %lo(HI_MAGIC), %o3
+	sllx		%o3, 32, %o4
+	or		%o4, %o3, %o3
+
+	srlx		%o3, 7, %o2
+msloop:
+	sub		%o1, 8, %o1
+	add		%o0, 8, %o0
+	sub		%o5, %o2, %o4
+	xor		%o4, %o5, %o4
+	andcc		%o4, %o3, %g3
+	bne,pn		%xcc, check_bytes
+	 srlx		%o4, 32, %g3
+
+	brgz,a,pt	%o1, msloop
+	 ldxa		[%o0] ASI_PL, %o5
+check_bytes:
+	bne,a,pn	%icc, 2f
+	 andcc		%o5, 0xff, %g0
+	add		%o0, -5, %g2
+	ba,pt		%xcc, 3f
+	 srlx		%o5, 32, %g7
+
+2:	srlx		%o5, 8, %g7
+	be,pn		%icc, 1f
+	 add		%o0, -8, %g2
+	andcc		%g7, 0xff, %g0
+	srlx		%g7, 8, %g7
+	be,pn		%icc, 1f
+	 inc		%g2
+	andcc		%g7, 0xff, %g0
+
+	srlx		%g7, 8, %g7
+	be,pn		%icc, 1f
+	 inc		%g2
+	andcc		%g7, 0xff, %g0
+	srlx		%g7, 8, %g7
+	be,pn		%icc, 1f
+	 inc		%g2
+	andcc		%g3, %o3, %g0
+
+	be,a,pn		%icc, 2f
+	 mov		%o0, %g2
+3:	andcc		%g7, 0xff, %g0
+	srlx		%g7, 8, %g7
+	be,pn		%icc, 1f
+	 inc		%g2
+	andcc		%g7, 0xff, %g0
+	srlx		%g7, 8, %g7
+
+	be,pn		%icc, 1f
+	 inc		%g2
+	andcc		%g7, 0xff, %g0
+	srlx		%g7, 8, %g7
+	be,pn		%icc, 1f
+	 inc		%g2
+	andcc		%g7, 0xff, %g0
+	srlx		%g7, 8, %g7
+
+	be,pn		%icc, 1f
+	 inc		%g2
+2:	brgz,a,pt	%o1, msloop
+	 ldxa		[%o0] ASI_PL, %o5
+	inc		%g2
+1:	add		%o0, %o1, %o0
+	cmp		%g2, %o0
+	retl
+
+	 movle		%xcc, %g2, %o0
+10:	retl
+	 sub		%o0, 1, %o0
+szzero:	retl
+	 nop
+
+memscan:
+__memscan_generic:
+	/* %o0 = addr, %o1 = c, %o2 = size */
+	brz,pn		%o2, 3f
+	 add		%o0, %o2, %o3
+	ldub		[%o0], %o5
+	sub		%g0, %o2, %o4
+1:
+	cmp		%o5, %o1
+	be,pn		%icc, 2f
+	 addcc		%o4, 1, %o4
+	bne,a,pt 	%xcc, 1b
+	 ldub		[%o3 + %o4], %o5
+	retl
+	/* The delay slot is the same as the next insn, this is just to make it look more awful */
+2:
+	 add		%o3, %o4, %o0
+	retl
+	 sub		%o0, 1, %o0
+3:
+	retl
+	 nop
diff --git a/arch/sparc64/lib/rwsem.S b/arch/sparc64/lib/rwsem.S
new file mode 100644
index 0000000..174ff7b
--- /dev/null
+++ b/arch/sparc64/lib/rwsem.S
@@ -0,0 +1,165 @@
+/* rwsem.S: RW semaphore assembler.
+ *
+ * Written by David S. Miller (davem@redhat.com), 2001.
+ * Derived from asm-i386/rwsem.h
+ */
+
+#include <asm/rwsem-const.h>
+
+	.section	.sched.text
+
+	.globl		__down_read
+__down_read:
+1:	lduw		[%o0], %g1
+	add		%g1, 1, %g7
+	cas		[%o0], %g1, %g7
+	cmp		%g1, %g7
+	bne,pn		%icc, 1b
+	 add		%g7, 1, %g7
+	cmp		%g7, 0
+	bl,pn		%icc, 3f
+	 membar		#StoreLoad | #StoreStore
+2:
+	retl
+	 nop
+3:
+	save		%sp, -192, %sp
+	call		rwsem_down_read_failed
+	 mov		%i0, %o0
+	ret
+	 restore
+	.size		__down_read, .-__down_read
+
+	.globl		__down_read_trylock
+__down_read_trylock:
+1:	lduw		[%o0], %g1
+	add		%g1, 1, %g7
+	cmp		%g7, 0
+	bl,pn		%icc, 2f
+	 mov		0, %o1
+	cas		[%o0], %g1, %g7
+	cmp		%g1, %g7
+	bne,pn		%icc, 1b
+	 mov		1, %o1
+	membar		#StoreLoad | #StoreStore
+2:	retl
+	 mov		%o1, %o0
+	.size		__down_read_trylock, .-__down_read_trylock
+
+	.globl		__down_write
+__down_write:
+	sethi		%hi(RWSEM_ACTIVE_WRITE_BIAS), %g1
+	or		%g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
+1:
+	lduw		[%o0], %g3
+	add		%g3, %g1, %g7
+	cas		[%o0], %g3, %g7
+	cmp		%g3, %g7
+	bne,pn		%icc, 1b
+	 cmp		%g7, 0
+	bne,pn		%icc, 3f
+	 membar		#StoreLoad | #StoreStore
+2:	retl
+	 nop
+3:
+	save		%sp, -192, %sp
+	call		rwsem_down_write_failed
+	 mov		%i0, %o0
+	ret
+	 restore
+	.size		__down_write, .-__down_write
+
+	.globl		__down_write_trylock
+__down_write_trylock:
+	sethi		%hi(RWSEM_ACTIVE_WRITE_BIAS), %g1
+	or		%g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
+1:
+	lduw		[%o0], %g3
+	cmp		%g3, 0
+	bne,pn		%icc, 2f
+	 mov		0, %o1
+	add		%g3, %g1, %g7
+	cas		[%o0], %g3, %g7
+	cmp		%g3, %g7
+	bne,pn		%icc, 1b
+	 mov		1, %o1
+	membar		#StoreLoad | #StoreStore
+2:	retl
+	 mov		%o1, %o0
+	.size		__down_write_trylock, .-__down_write_trylock
+
+	.globl		__up_read
+__up_read:
+1:
+	lduw		[%o0], %g1
+	sub		%g1, 1, %g7
+	cas		[%o0], %g1, %g7
+	cmp		%g1, %g7
+	bne,pn		%icc, 1b
+	 cmp		%g7, 0
+	bl,pn		%icc, 3f
+	 membar		#StoreLoad | #StoreStore
+2:	retl
+	 nop
+3:	sethi		%hi(RWSEM_ACTIVE_MASK), %g1
+	sub		%g7, 1, %g7
+	or		%g1, %lo(RWSEM_ACTIVE_MASK), %g1
+	andcc		%g7, %g1, %g0
+	bne,pn		%icc, 2b
+	 nop
+	save		%sp, -192, %sp
+	call		rwsem_wake
+	 mov		%i0, %o0
+	ret
+	 restore
+	.size		__up_read, .-__up_read
+
+	.globl		__up_write
+__up_write:
+	sethi		%hi(RWSEM_ACTIVE_WRITE_BIAS), %g1
+	or		%g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
+1:
+	lduw		[%o0], %g3
+	sub		%g3, %g1, %g7
+	cas		[%o0], %g3, %g7
+	cmp		%g3, %g7
+	bne,pn		%icc, 1b
+	 sub		%g7, %g1, %g7
+	cmp		%g7, 0
+	bl,pn		%icc, 3f
+	 membar		#StoreLoad | #StoreStore
+2:
+	retl
+	 nop
+3:
+	save		%sp, -192, %sp
+	call		rwsem_wake
+	 mov		%i0, %o0
+	ret
+	 restore
+	.size		__up_write, .-__up_write
+
+	.globl		__downgrade_write
+__downgrade_write:
+	sethi		%hi(RWSEM_WAITING_BIAS), %g1
+	or		%g1, %lo(RWSEM_WAITING_BIAS), %g1
+1:
+	lduw		[%o0], %g3
+	sub		%g3, %g1, %g7
+	cas		[%o0], %g3, %g7
+	cmp		%g3, %g7
+	bne,pn		%icc, 1b
+	 sub		%g7, %g1, %g7
+	cmp		%g7, 0
+	bl,pn		%icc, 3f
+	 membar		#StoreLoad | #StoreStore
+2:
+	retl
+	 nop
+3:
+	save		%sp, -192, %sp
+	call		rwsem_downgrade_wake
+	 mov		%i0, %o0
+	ret
+	 restore
+	.size		__downgrade_write, .-__downgrade_write
diff --git a/arch/sparc64/lib/strlen.S b/arch/sparc64/lib/strlen.S
new file mode 100644
index 0000000..e9ba192
--- /dev/null
+++ b/arch/sparc64/lib/strlen.S
@@ -0,0 +1,80 @@
+/* strlen.S: Sparc64 optimized strlen code
+ * Hand optimized from GNU libc's strlen
+ * Copyright (C) 1991,1996 Free Software Foundation
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996, 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#define LO_MAGIC 0x01010101
+#define HI_MAGIC 0x80808080
+
+	.align	32
+	.globl	strlen
+	.type	strlen,#function
+strlen:
+	mov	%o0, %o1
+	andcc	%o0, 3, %g0
+	be,pt	%icc, 9f
+	 sethi	%hi(HI_MAGIC), %o4
+	ldub	[%o0], %o5
+	brz,pn	%o5, 11f
+	 add	%o0, 1, %o0
+	andcc	%o0, 3, %g0
+	be,pn	%icc, 4f
+	 or	%o4, %lo(HI_MAGIC), %o3
+	ldub	[%o0], %o5
+	brz,pn	%o5, 12f
+	 add	%o0, 1, %o0
+	andcc	%o0, 3, %g0
+	be,pt	%icc, 5f
+	 sethi	%hi(LO_MAGIC), %o4
+	ldub	[%o0], %o5
+	brz,pn	%o5, 13f
+	 add	%o0, 1, %o0
+	ba,pt	%icc, 8f
+	 or	%o4, %lo(LO_MAGIC), %o2
+9:
+	or	%o4, %lo(HI_MAGIC), %o3
+4:
+	sethi	%hi(LO_MAGIC), %o4
+5:
+	or	%o4, %lo(LO_MAGIC), %o2
+8:
+	ld	[%o0], %o5
+2:
+	sub	%o5, %o2, %o4
+	andcc	%o4, %o3, %g0
+	be,pt	%icc, 8b
+	 add	%o0, 4, %o0
+
+	/* Check every byte. */
+	srl	%o5, 24, %g7
+	andcc	%g7, 0xff, %g0
+	be,pn	%icc, 1f
+	 add	%o0, -4, %o4
+	srl	%o5, 16, %g7
+	andcc	%g7, 0xff, %g0
+	be,pn	%icc, 1f
+	 add	%o4, 1, %o4
+	srl	%o5, 8, %g7
+	andcc	%g7, 0xff, %g0
+	be,pn	%icc, 1f
+	 add	%o4, 1, %o4
+	andcc	%o5, 0xff, %g0
+	bne,a,pt %icc, 2b
+	 ld	[%o0], %o5
+	add	%o4, 1, %o4
+1:
+	retl
+	 sub	%o4, %o1, %o0
+11:
+	retl
+	 mov	0, %o0
+12:
+	retl
+	 mov	1, %o0
+13:
+	retl
+	 mov	2, %o0
+
+	.size	strlen, .-strlen
diff --git a/arch/sparc64/lib/strlen_user.S b/arch/sparc64/lib/strlen_user.S
new file mode 100644
index 0000000..9ed54ba
--- /dev/null
+++ b/arch/sparc64/lib/strlen_user.S
@@ -0,0 +1,95 @@
+/* strlen_user.S: Sparc64 optimized strlen_user code
+ *
+ * Return length of string in userspace including terminating 0
+ * or 0 for error
+ *
+ * Copyright (C) 1991,1996 Free Software Foundation
+ * Copyright (C) 1996,1999 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <asm/asi.h>
+
+#define LO_MAGIC 0x01010101
+#define HI_MAGIC 0x80808080
+
+	.align 4
+	.global __strlen_user, __strnlen_user
+__strlen_user:
+	sethi	%hi(32768), %o1
+__strnlen_user:	
+	mov	%o1, %g1
+	mov	%o0, %o1
+	andcc	%o0, 3, %g0
+	be,pt	%icc, 9f
+	 sethi	%hi(HI_MAGIC), %o4
+10:	lduba	[%o0] %asi, %o5
+	brz,pn	%o5, 21f
+	 add	%o0, 1, %o0
+	andcc	%o0, 3, %g0
+	be,pn	%icc, 4f
+	 or	%o4, %lo(HI_MAGIC), %o3
+11:	lduba	[%o0] %asi, %o5
+	brz,pn	%o5, 22f
+	 add	%o0, 1, %o0
+	andcc	%o0, 3, %g0
+	be,pt	%icc, 13f
+	 srl	%o3, 7, %o2
+12:	lduba	[%o0] %asi, %o5
+	brz,pn	%o5, 23f
+	 add	%o0, 1, %o0
+	ba,pt	%icc, 2f
+15:	 lda	[%o0] %asi, %o5
+9:	or	%o4, %lo(HI_MAGIC), %o3
+4:	srl	%o3, 7, %o2
+13:	lda	[%o0] %asi, %o5
+2:	sub	%o5, %o2, %o4
+	andcc	%o4, %o3, %g0
+	bne,pn	%icc, 82f
+	 add	%o0, 4, %o0
+	sub	%o0, %o1, %g2
+81:	cmp	%g2, %g1
+	blu,pt	%icc, 13b
+	 mov	%o0, %o4
+	ba,a,pt	%xcc, 1f
+
+	/* Check every byte. */
+82:	srl	%o5, 24, %g7
+	andcc	%g7, 0xff, %g0
+	be,pn	%icc, 1f
+	 add	%o0, -3, %o4
+	srl	%o5, 16, %g7
+	andcc	%g7, 0xff, %g0
+	be,pn	%icc, 1f
+	 add	%o4, 1, %o4
+	srl	%o5, 8, %g7
+	andcc	%g7, 0xff, %g0
+	be,pn	%icc, 1f
+	 add	%o4, 1, %o4
+	andcc	%o5, 0xff, %g0
+	bne,pt	%icc, 81b
+	 sub	%o0, %o1, %g2
+	add	%o4, 1, %o4
+1:	retl
+	 sub	%o4, %o1, %o0
+21:	retl
+	 mov	1, %o0
+22:	retl
+	 mov	2, %o0
+23:	retl
+	 mov	3, %o0
+
+        .section .fixup,#alloc,#execinstr
+        .align  4
+30:
+        retl
+         clr    %o0
+
+	.section __ex_table,#alloc
+	.align	4
+
+	.word	10b, 30b
+	.word	11b, 30b
+	.word	12b, 30b
+	.word	15b, 30b
+	.word	13b, 30b
diff --git a/arch/sparc64/lib/strncmp.S b/arch/sparc64/lib/strncmp.S
new file mode 100644
index 0000000..6f14f53
--- /dev/null
+++ b/arch/sparc64/lib/strncmp.S
@@ -0,0 +1,32 @@
+/* $Id: strncmp.S,v 1.2 1997/03/11 17:51:44 jj Exp $
+ * Sparc64 optimized strncmp code.
+ *
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <asm/asi.h>
+
+	.text
+	.align	32
+	.globl	strncmp
+	.type	strncmp,#function
+strncmp:
+	brlez,pn %o2, 3f
+	 lduba	[%o0] (ASI_PNF), %o3
+1:
+	add	%o0, 1, %o0
+	ldub	[%o1], %o4
+	brz,pn	%o3, 2f
+	 add	%o1, 1, %o1
+	cmp	%o3, %o4
+	bne,pn	%icc, 2f
+	 subcc	%o2, 1, %o2
+	bne,a,pt %xcc, 1b
+	 ldub	[%o0], %o3
+2:
+	retl
+	 sub	%o3, %o4, %o0
+3:
+	retl
+	 clr	%o0
+	.size	strncmp, .-strncmp
diff --git a/arch/sparc64/lib/strncpy_from_user.S b/arch/sparc64/lib/strncpy_from_user.S
new file mode 100644
index 0000000..09cbbaa
--- /dev/null
+++ b/arch/sparc64/lib/strncpy_from_user.S
@@ -0,0 +1,139 @@
+/* $Id: strncpy_from_user.S,v 1.6 1999/05/25 16:53:05 jj Exp $
+ * strncpy_from_user.S: Sparc64 strncpy from userspace.
+ *
+ *  Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
+ */
+
+#include <asm/asi.h>
+#include <asm/errno.h>
+
+	.data
+	.align	8
+0:	.xword	0x0101010101010101
+
+	.text
+	.align	32
+
+	/* Must return:
+	 *
+	 * -EFAULT		for an exception
+	 * count		if we hit the buffer limit
+	 * bytes copied		if we hit a null byte
+	 * (without the null byte)
+	 *
+	 * This implementation assumes:
+	 * %o1 is 8 aligned => !(%o2 & 7)
+	 * %o0 is 8 aligned (if not, it will be slooooow, but will work)
+	 *
+	 * This is optimized for the common case:
+	 * in my stats, 90% of src are 8 aligned (even on sparc32)
+	 * and average length is 18 or so.
+	 */
+
+	.globl	__strncpy_from_user
+	.type	__strncpy_from_user,#function
+__strncpy_from_user:
+	/* %o0=dest, %o1=src, %o2=count */
+	andcc	%o1, 7, %g0		! IEU1	Group
+	bne,pn	%icc, 30f		! CTI
+	 add	%o0, %o2, %g3		! IEU0
+60:	ldxa	[%o1] %asi, %g1		! Load	Group
+	brlez,pn %o2, 10f		! CTI
+	 mov	%o0, %o3		! IEU0
+50:	sethi	%hi(0b), %o4		! IEU0	Group
+	ldx	[%o4 + %lo(0b)], %o4	! Load
+	sllx	%o4, 7, %o5		! IEU1	Group
+1:	sub	%g1, %o4, %g2		! IEU0	Group
+	stx	%g1, [%o0]		! Store
+	add	%o0, 8, %o0		! IEU1
+	andcc	%g2, %o5, %g0		! IEU1	Group
+	bne,pn	%xcc, 5f		! CTI
+	 add	%o1, 8, %o1		! IEU0
+	cmp	%o0, %g3		! IEU1	Group
+	bl,a,pt %xcc, 1b		! CTI
+61:	 ldxa	[%o1] %asi, %g1		! Load
+10:	retl				! CTI	Group
+	 mov	%o2, %o0		! IEU0
+5:	srlx	%g2, 32, %g7		! IEU0	Group
+	sethi	%hi(0xff00), %o4	! IEU1
+	andcc	%g7, %o5, %g0		! IEU1	Group
+	be,pn	%icc, 2f		! CTI
+	 or	%o4, %lo(0xff00), %o4	! IEU0
+	srlx	%g1, 48, %g7		! IEU0	Group
+	andcc	%g7, %o4, %g0		! IEU1	Group
+	be,pn	%icc, 50f		! CTI
+	 andcc	%g7, 0xff, %g0		! IEU1	Group
+	be,pn	%icc, 51f		! CTI
+	 srlx	%g1, 32, %g7		! IEU0
+	andcc	%g7, %o4, %g0		! IEU1	Group
+	be,pn	%icc, 52f		! CTI
+	 andcc	%g7, 0xff, %g0		! IEU1	Group
+	be,pn	%icc, 53f		! CTI
+2:	 andcc	%g2, %o5, %g0		! IEU1	Group
+	be,pn	%icc, 2f		! CTI
+	 srl	%g1, 16, %g7		! IEU0
+	andcc	%g7, %o4, %g0		! IEU1	Group
+	be,pn	%icc, 54f		! CTI
+	 andcc	%g7, 0xff, %g0		! IEU1	Group
+	be,pn	%icc, 55f		! CTI
+	 andcc	%g1, %o4, %g0		! IEU1	Group
+	be,pn	%icc, 56f		! CTI
+	 andcc	%g1, 0xff, %g0		! IEU1	Group
+	be,a,pn	%icc, 57f		! CTI
+	 sub	%o0, %o3, %o0		! IEU0
+2:	cmp	%o0, %g3		! IEU1	Group
+	bl,a,pt	%xcc, 50b		! CTI
+62:	 ldxa	[%o1] %asi, %g1		! Load
+	retl				! CTI	Group
+	 mov	%o2, %o0		! IEU0
+50:	sub	%o0, %o3, %o0
+	retl
+	 sub	%o0, 8, %o0
+51:	sub	%o0, %o3, %o0
+	retl
+	 sub	%o0, 7, %o0
+52:	sub	%o0, %o3, %o0
+	retl
+	 sub	%o0, 6, %o0
+53:	sub	%o0, %o3, %o0
+	retl
+	 sub	%o0, 5, %o0
+54:	sub	%o0, %o3, %o0
+	retl
+	 sub	%o0, 4, %o0
+55:	sub	%o0, %o3, %o0
+	retl
+	 sub	%o0, 3, %o0
+56:	sub	%o0, %o3, %o0
+	retl
+	 sub	%o0, 2, %o0
+57:	retl
+	 sub	%o0, 1, %o0
+30:	brlez,pn %o2, 3f
+	 sub	%g0, %o2, %o3
+	add	%o0, %o2, %o0
+63:	lduba	[%o1] %asi, %o4
+1:	add	%o1, 1, %o1
+	brz,pn	%o4, 2f
+	 stb	%o4, [%o0 + %o3]
+	addcc	%o3, 1, %o3
+	bne,pt	%xcc, 1b
+64:	 lduba	[%o1] %asi, %o4
+3:	retl
+	 mov	%o2, %o0
+2:	retl
+	 add	%o2, %o3, %o0
+	.size	__strncpy_from_user, .-__strncpy_from_user
+
+	.section .fixup,#alloc,#execinstr
+	.align	4
+4:	retl
+	 mov	-EFAULT, %o0
+
+	.section __ex_table,#alloc
+	.align	4
+	.word	60b, 4b
+	.word	61b, 4b
+	.word	62b, 4b
+	.word	63b, 4b
+	.word	64b, 4b
diff --git a/arch/sparc64/lib/user_fixup.c b/arch/sparc64/lib/user_fixup.c
new file mode 100644
index 0000000..0278e34
--- /dev/null
+++ b/arch/sparc64/lib/user_fixup.c
@@ -0,0 +1,71 @@
+/* user_fixup.c: Fix up user copy faults.
+ *
+ * Copyright (C) 2004 David S. Miller <davem@redhat.com>
+ */
+
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+
+/* Calculating the exact fault address when using
+ * block loads and stores can be very complicated.
+ * Instead of trying to be clever and handling all
+ * of the cases, just fix things up simply here.
+ */
+
+unsigned long copy_from_user_fixup(void *to, const void __user *from, unsigned long size)
+{
+	char *dst = to;
+	const char __user *src = from;
+
+	while (size) {
+		if (__get_user(*dst, src))
+			break;
+		dst++;
+		src++;
+		size--;
+	}
+
+	if (size)
+		memset(dst, 0, size);
+
+	return size;
+}
+
+unsigned long copy_to_user_fixup(void __user *to, const void *from, unsigned long size)
+{
+	char __user *dst = to;
+	const char *src = from;
+
+	while (size) {
+		if (__put_user(*src, dst))
+			break;
+		dst++;
+		src++;
+		size--;
+	}
+
+	return size;
+}
+
+unsigned long copy_in_user_fixup(void __user *to, void __user *from, unsigned long size)
+{
+	char __user *dst = to;
+	char __user *src = from;
+
+	while (size) {
+		char tmp;
+
+		if (__get_user(tmp, src))
+			break;
+		if (__put_user(tmp, dst))
+			break;
+		dst++;
+		src++;
+		size--;
+	}
+
+	return size;
+}
diff --git a/arch/sparc64/lib/xor.S b/arch/sparc64/lib/xor.S
new file mode 100644
index 0000000..4cd5d2b
--- /dev/null
+++ b/arch/sparc64/lib/xor.S
@@ -0,0 +1,354 @@
+/*
+ * arch/sparc64/lib/xor.S
+ *
+ * High speed xor_block operation for RAID4/5 utilizing the
+ * UltraSparc Visual Instruction Set.
+ *
+ * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
+ */
+
+#include <asm/visasm.h>
+#include <asm/asi.h>
+#include <asm/dcu.h>
+#include <asm/spitfire.h>
+
+/*
+ *	Requirements:
+ *	!(((long)dest | (long)sourceN) & (64 - 1)) &&
+ *	!(len & 127) && len >= 256
+ */
+	.text
+	.align	32
+	.globl	xor_vis_2
+	.type	xor_vis_2,#function
+xor_vis_2:
+	rd	%fprs, %o5
+	andcc	%o5, FPRS_FEF|FPRS_DU, %g0
+	be,pt	%icc, 0f
+	 sethi	%hi(VISenter), %g1
+	jmpl	%g1 + %lo(VISenter), %g7
+	 add	%g7, 8, %g7
+0:	wr	%g0, FPRS_FEF, %fprs
+	rd	%asi, %g1
+	wr	%g0, ASI_BLK_P, %asi
+	membar	#LoadStore|#StoreLoad|#StoreStore
+	sub	%o0, 128, %o0
+	ldda	[%o1] %asi, %f0
+	ldda	[%o2] %asi, %f16
+
+2:	ldda	[%o1 + 64] %asi, %f32
+	fxor	%f0, %f16, %f16
+	fxor	%f2, %f18, %f18
+	fxor	%f4, %f20, %f20
+	fxor	%f6, %f22, %f22
+	fxor	%f8, %f24, %f24
+	fxor	%f10, %f26, %f26
+	fxor	%f12, %f28, %f28
+	fxor	%f14, %f30, %f30
+	stda	%f16, [%o1] %asi
+	ldda	[%o2 + 64] %asi, %f48
+	ldda	[%o1 + 128] %asi, %f0
+	fxor	%f32, %f48, %f48
+	fxor	%f34, %f50, %f50
+	add	%o1, 128, %o1
+	fxor	%f36, %f52, %f52
+	add	%o2, 128, %o2
+	fxor	%f38, %f54, %f54
+	subcc	%o0, 128, %o0
+	fxor	%f40, %f56, %f56
+	fxor	%f42, %f58, %f58
+	fxor	%f44, %f60, %f60
+	fxor	%f46, %f62, %f62
+	stda	%f48, [%o1 - 64] %asi
+	bne,pt	%xcc, 2b
+	 ldda	[%o2] %asi, %f16
+
+	ldda	[%o1 + 64] %asi, %f32
+	fxor	%f0, %f16, %f16
+	fxor	%f2, %f18, %f18
+	fxor	%f4, %f20, %f20
+	fxor	%f6, %f22, %f22
+	fxor	%f8, %f24, %f24
+	fxor	%f10, %f26, %f26
+	fxor	%f12, %f28, %f28
+	fxor	%f14, %f30, %f30
+	stda	%f16, [%o1] %asi
+	ldda	[%o2 + 64] %asi, %f48
+	membar	#Sync
+	fxor	%f32, %f48, %f48
+	fxor	%f34, %f50, %f50
+	fxor	%f36, %f52, %f52
+	fxor	%f38, %f54, %f54
+	fxor	%f40, %f56, %f56
+	fxor	%f42, %f58, %f58
+	fxor	%f44, %f60, %f60
+	fxor	%f46, %f62, %f62
+	stda	%f48, [%o1 + 64] %asi
+	membar	#Sync|#StoreStore|#StoreLoad
+	wr	%g1, %g0, %asi
+	retl
+	  wr	%g0, 0, %fprs
+	.size	xor_vis_2, .-xor_vis_2
+
+	.globl	xor_vis_3
+	.type	xor_vis_3,#function
+xor_vis_3:
+	rd	%fprs, %o5
+	andcc	%o5, FPRS_FEF|FPRS_DU, %g0
+	be,pt	%icc, 0f
+	 sethi	%hi(VISenter), %g1
+	jmpl	%g1 + %lo(VISenter), %g7
+	 add	%g7, 8, %g7
+0:	wr	%g0, FPRS_FEF, %fprs
+	rd	%asi, %g1
+	wr	%g0, ASI_BLK_P, %asi
+	membar	#LoadStore|#StoreLoad|#StoreStore
+	sub	%o0, 64, %o0
+	ldda	[%o1] %asi, %f0
+	ldda	[%o2] %asi, %f16
+
+3:	ldda	[%o3] %asi, %f32
+	fxor	%f0, %f16, %f48
+	fxor	%f2, %f18, %f50
+	add	%o1, 64, %o1
+	fxor	%f4, %f20, %f52
+	fxor	%f6, %f22, %f54
+	add	%o2, 64, %o2
+	fxor	%f8, %f24, %f56
+	fxor	%f10, %f26, %f58
+	fxor	%f12, %f28, %f60
+	fxor	%f14, %f30, %f62
+	ldda	[%o1] %asi, %f0
+	fxor	%f48, %f32, %f48
+	fxor	%f50, %f34, %f50
+	fxor	%f52, %f36, %f52
+	fxor	%f54, %f38, %f54
+	add	%o3, 64, %o3
+	fxor	%f56, %f40, %f56
+	fxor	%f58, %f42, %f58
+	subcc	%o0, 64, %o0
+	fxor	%f60, %f44, %f60
+	fxor	%f62, %f46, %f62
+	stda	%f48, [%o1 - 64] %asi
+	bne,pt	%xcc, 3b
+	 ldda	[%o2] %asi, %f16
+
+	ldda	[%o3] %asi, %f32
+	fxor	%f0, %f16, %f48
+	fxor	%f2, %f18, %f50
+	fxor	%f4, %f20, %f52
+	fxor	%f6, %f22, %f54
+	fxor	%f8, %f24, %f56
+	fxor	%f10, %f26, %f58
+	fxor	%f12, %f28, %f60
+	fxor	%f14, %f30, %f62
+	membar	#Sync
+	fxor	%f48, %f32, %f48
+	fxor	%f50, %f34, %f50
+	fxor	%f52, %f36, %f52
+	fxor	%f54, %f38, %f54
+	fxor	%f56, %f40, %f56
+	fxor	%f58, %f42, %f58
+	fxor	%f60, %f44, %f60
+	fxor	%f62, %f46, %f62
+	stda	%f48, [%o1] %asi
+	membar	#Sync|#StoreStore|#StoreLoad
+	wr	%g1, %g0, %asi
+	retl
+	 wr	%g0, 0, %fprs
+	.size	xor_vis_3, .-xor_vis_3
+
+	.globl	xor_vis_4
+	.type	xor_vis_4,#function
+xor_vis_4:
+	rd	%fprs, %o5
+	andcc	%o5, FPRS_FEF|FPRS_DU, %g0
+	be,pt	%icc, 0f
+	 sethi	%hi(VISenter), %g1
+	jmpl	%g1 + %lo(VISenter), %g7
+	 add	%g7, 8, %g7
+0:	wr	%g0, FPRS_FEF, %fprs
+	rd	%asi, %g1
+	wr	%g0, ASI_BLK_P, %asi
+	membar	#LoadStore|#StoreLoad|#StoreStore
+	sub	%o0, 64, %o0
+	ldda	[%o1] %asi, %f0
+	ldda	[%o2] %asi, %f16
+
+4:	ldda	[%o3] %asi, %f32
+	fxor	%f0, %f16, %f16
+	fxor	%f2, %f18, %f18
+	add	%o1, 64, %o1
+	fxor	%f4, %f20, %f20
+	fxor	%f6, %f22, %f22
+	add	%o2, 64, %o2
+	fxor	%f8, %f24, %f24
+	fxor	%f10, %f26, %f26
+	fxor	%f12, %f28, %f28
+	fxor	%f14, %f30, %f30
+	ldda	[%o4] %asi, %f48
+	fxor	%f16, %f32, %f32
+	fxor	%f18, %f34, %f34
+	fxor	%f20, %f36, %f36
+	fxor	%f22, %f38, %f38
+	add	%o3, 64, %o3
+	fxor	%f24, %f40, %f40
+	fxor	%f26, %f42, %f42
+	fxor	%f28, %f44, %f44
+	fxor	%f30, %f46, %f46
+	ldda	[%o1] %asi, %f0
+	fxor	%f32, %f48, %f48
+	fxor	%f34, %f50, %f50
+	fxor	%f36, %f52, %f52
+	add	%o4, 64, %o4
+	fxor	%f38, %f54, %f54
+	fxor	%f40, %f56, %f56
+	fxor	%f42, %f58, %f58
+	subcc	%o0, 64, %o0
+	fxor	%f44, %f60, %f60
+	fxor	%f46, %f62, %f62
+	stda	%f48, [%o1 - 64] %asi
+	bne,pt	%xcc, 4b
+	 ldda	[%o2] %asi, %f16
+
+	ldda	[%o3] %asi, %f32
+	fxor	%f0, %f16, %f16
+	fxor	%f2, %f18, %f18
+	fxor	%f4, %f20, %f20
+	fxor	%f6, %f22, %f22
+	fxor	%f8, %f24, %f24
+	fxor	%f10, %f26, %f26
+	fxor	%f12, %f28, %f28
+	fxor	%f14, %f30, %f30
+	ldda	[%o4] %asi, %f48
+	fxor	%f16, %f32, %f32
+	fxor	%f18, %f34, %f34
+	fxor	%f20, %f36, %f36
+	fxor	%f22, %f38, %f38
+	fxor	%f24, %f40, %f40
+	fxor	%f26, %f42, %f42
+	fxor	%f28, %f44, %f44
+	fxor	%f30, %f46, %f46
+	membar	#Sync
+	fxor	%f32, %f48, %f48
+	fxor	%f34, %f50, %f50
+	fxor	%f36, %f52, %f52
+	fxor	%f38, %f54, %f54
+	fxor	%f40, %f56, %f56
+	fxor	%f42, %f58, %f58
+	fxor	%f44, %f60, %f60
+	fxor	%f46, %f62, %f62
+	stda	%f48, [%o1] %asi
+	membar	#Sync|#StoreStore|#StoreLoad
+	wr	%g1, %g0, %asi
+	retl
+	 wr	%g0, 0, %fprs
+	.size	xor_vis_4, .-xor_vis_4
+
+	.globl	xor_vis_5
+	.type	xor_vis_5,#function
+xor_vis_5:
+	save	%sp, -192, %sp
+	rd	%fprs, %o5
+	andcc	%o5, FPRS_FEF|FPRS_DU, %g0
+	be,pt	%icc, 0f
+	 sethi	%hi(VISenter), %g1
+	jmpl	%g1 + %lo(VISenter), %g7
+	 add	%g7, 8, %g7
+0:	wr	%g0, FPRS_FEF, %fprs
+	rd	%asi, %g1
+	wr	%g0, ASI_BLK_P, %asi
+	membar	#LoadStore|#StoreLoad|#StoreStore
+	sub	%i0, 64, %i0
+	ldda	[%i1] %asi, %f0
+	ldda	[%i2] %asi, %f16
+
+5:	ldda	[%i3] %asi, %f32
+	fxor	%f0, %f16, %f48
+	fxor	%f2, %f18, %f50
+	add	%i1, 64, %i1
+	fxor	%f4, %f20, %f52
+	fxor	%f6, %f22, %f54
+	add	%i2, 64, %i2
+	fxor	%f8, %f24, %f56
+	fxor	%f10, %f26, %f58
+	fxor	%f12, %f28, %f60
+	fxor	%f14, %f30, %f62
+	ldda	[%i4] %asi, %f16
+	fxor	%f48, %f32, %f48
+	fxor	%f50, %f34, %f50
+	fxor	%f52, %f36, %f52
+	fxor	%f54, %f38, %f54
+	add	%i3, 64, %i3
+	fxor	%f56, %f40, %f56
+	fxor	%f58, %f42, %f58
+	fxor	%f60, %f44, %f60
+	fxor	%f62, %f46, %f62
+	ldda	[%i5] %asi, %f32
+	fxor	%f48, %f16, %f48
+	fxor	%f50, %f18, %f50
+	add	%i4, 64, %i4
+	fxor	%f52, %f20, %f52
+	fxor	%f54, %f22, %f54
+	add	%i5, 64, %i5
+	fxor	%f56, %f24, %f56
+	fxor	%f58, %f26, %f58
+	fxor	%f60, %f28, %f60
+	fxor	%f62, %f30, %f62
+	ldda	[%i1] %asi, %f0
+	fxor	%f48, %f32, %f48
+	fxor	%f50, %f34, %f50
+	fxor	%f52, %f36, %f52
+	fxor	%f54, %f38, %f54
+	fxor	%f56, %f40, %f56
+	fxor	%f58, %f42, %f58
+	subcc	%i0, 64, %i0
+	fxor	%f60, %f44, %f60
+	fxor	%f62, %f46, %f62
+	stda	%f48, [%i1 - 64] %asi
+	bne,pt	%xcc, 5b
+	 ldda	[%i2] %asi, %f16
+
+	ldda	[%i3] %asi, %f32
+	fxor	%f0, %f16, %f48
+	fxor	%f2, %f18, %f50
+	fxor	%f4, %f20, %f52
+	fxor	%f6, %f22, %f54
+	fxor	%f8, %f24, %f56
+	fxor	%f10, %f26, %f58
+	fxor	%f12, %f28, %f60
+	fxor	%f14, %f30, %f62
+	ldda	[%i4] %asi, %f16
+	fxor	%f48, %f32, %f48
+	fxor	%f50, %f34, %f50
+	fxor	%f52, %f36, %f52
+	fxor	%f54, %f38, %f54
+	fxor	%f56, %f40, %f56
+	fxor	%f58, %f42, %f58
+	fxor	%f60, %f44, %f60
+	fxor	%f62, %f46, %f62
+	ldda	[%i5] %asi, %f32
+	fxor	%f48, %f16, %f48
+	fxor	%f50, %f18, %f50
+	fxor	%f52, %f20, %f52
+	fxor	%f54, %f22, %f54
+	fxor	%f56, %f24, %f56
+	fxor	%f58, %f26, %f58
+	fxor	%f60, %f28, %f60
+	fxor	%f62, %f30, %f62
+	membar	#Sync
+	fxor	%f48, %f32, %f48
+	fxor	%f50, %f34, %f50
+	fxor	%f52, %f36, %f52
+	fxor	%f54, %f38, %f54
+	fxor	%f56, %f40, %f56
+	fxor	%f58, %f42, %f58
+	fxor	%f60, %f44, %f60
+	fxor	%f62, %f46, %f62
+	stda	%f48, [%i1] %asi
+	membar	#Sync|#StoreStore|#StoreLoad
+	wr	%g1, %g0, %asi
+	wr	%g0, 0, %fprs
+	ret
+	 restore
+	.size	xor_vis_5, .-xor_vis_5