Linux-2.6.12-rc2

Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.

Let it rip!
diff --git a/arch/sparc/lib/COPYING.LIB b/arch/sparc/lib/COPYING.LIB
new file mode 100644
index 0000000..eb685a5
--- /dev/null
+++ b/arch/sparc/lib/COPYING.LIB
@@ -0,0 +1,481 @@
+		  GNU LIBRARY GENERAL PUBLIC LICENSE
+		       Version 2, June 1991
+
+ Copyright (C) 1991 Free Software Foundation, Inc.
+                    675 Mass Ave, Cambridge, MA 02139, USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the library GPL.  It is
+ numbered 2 because it goes with version 2 of the ordinary GPL.]
+
+			    Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+  This license, the Library General Public License, applies to some
+specially designated Free Software Foundation software, and to any
+other libraries whose authors decide to use it.  You can use it for
+your libraries, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+  To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if
+you distribute copies of the library, or if you modify it.
+
+  For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you.  You must make sure that they, too, receive or can get the source
+code.  If you link a program with the library, you must provide
+complete object files to the recipients so that they can relink them
+with the library, after making changes to the library and recompiling
+it.  And you must show them these terms so they know their rights.
+
+  Our method of protecting your rights has two steps: (1) copyright
+the library, and (2) offer you this license which gives you legal
+permission to copy, distribute and/or modify the library.
+
+  Also, for each distributor's protection, we want to make certain
+that everyone understands that there is no warranty for this free
+library.  If the library is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original
+version, so that any problems introduced by others will not reflect on
+the original authors' reputations.
+
+  Finally, any free program is threatened constantly by software
+patents.  We wish to avoid the danger that companies distributing free
+software will individually obtain patent licenses, thus in effect
+transforming the program into proprietary software.  To prevent this,
+we have made it clear that any patent must be licensed for everyone's
+free use or not licensed at all.
+
+  Most GNU software, including some libraries, is covered by the ordinary
+GNU General Public License, which was designed for utility programs.  This
+license, the GNU Library General Public License, applies to certain
+designated libraries.  This license is quite different from the ordinary
+one; be sure to read it in full, and don't assume that anything in it is
+the same as in the ordinary license.
+
+  The reason we have a separate public license for some libraries is that
+they blur the distinction we usually make between modifying or adding to a
+program and simply using it.  Linking a program with a library, without
+changing the library, is in some sense simply using the library, and is
+analogous to running a utility program or application program.  However, in
+a textual and legal sense, the linked executable is a combined work, a
+derivative of the original library, and the ordinary General Public License
+treats it as such.
+
+  Because of this blurred distinction, using the ordinary General
+Public License for libraries did not effectively promote software
+sharing, because most developers did not use the libraries.  We
+concluded that weaker conditions might promote sharing better.
+
+  However, unrestricted linking of non-free programs would deprive the
+users of those programs of all benefit from the free status of the
+libraries themselves.  This Library General Public License is intended to
+permit developers of non-free programs to use free libraries, while
+preserving your freedom as a user of such programs to change the free
+libraries that are incorporated in them.  (We have not seen how to achieve
+this as regards changes in header files, but we have achieved it as regards
+changes in the actual functions of the Library.)  The hope is that this
+will lead to faster development of free libraries.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.  Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library".  The
+former contains code derived from the library, while the latter only
+works together with the library.
+
+  Note that it is possible for a library to be covered by the ordinary
+General Public License rather than by this special one.
+
+		  GNU LIBRARY GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License Agreement applies to any software library which
+contains a notice placed by the copyright holder or other authorized
+party saying it may be distributed under the terms of this Library
+General Public License (also called "this License").  Each licensee is
+addressed as "you".
+
+  A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+  The "Library", below, refers to any such software library or work
+which has been distributed under these terms.  A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language.  (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+  "Source code" for a work means the preferred form of the work for
+making modifications to it.  For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+  Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it).  Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+  
+  1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+  You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+  2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) The modified work must itself be a software library.
+
+    b) You must cause the files modified to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    c) You must cause the whole of the work to be licensed at no
+    charge to all third parties under the terms of this License.
+
+    d) If a facility in the modified Library refers to a function or a
+    table of data to be supplied by an application program that uses
+    the facility, other than as an argument passed when the facility
+    is invoked, then you must make a good faith effort to ensure that,
+    in the event an application does not supply such function or
+    table, the facility still operates, and performs whatever part of
+    its purpose remains meaningful.
+
+    (For example, a function in a library to compute square roots has
+    a purpose that is entirely well-defined independent of the
+    application.  Therefore, Subsection 2d requires that any
+    application-supplied function or table used by this function must
+    be optional: if the application does not supply it, the square
+    root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library.  To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License.  (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.)  Do not make any other change in
+these notices.
+
+  Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+  This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+  4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+  If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library".  Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+  However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library".  The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+  When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library.  The
+threshold for this to be true is not precisely defined by law.
+
+  If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work.  (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+  Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+
+  6. As an exception to the Sections above, you may also compile or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+  You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License.  You must supply a copy of this License.  If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License.  Also, you must do one
+of these things:
+
+    a) Accompany the work with the complete corresponding
+    machine-readable source code for the Library including whatever
+    changes were used in the work (which must be distributed under
+    Sections 1 and 2 above); and, if the work is an executable linked
+    with the Library, with the complete machine-readable "work that
+    uses the Library", as object code and/or source code, so that the
+    user can modify the Library and then relink to produce a modified
+    executable containing the modified Library.  (It is understood
+    that the user who changes the contents of definitions files in the
+    Library will not necessarily be able to recompile the application
+    to use the modified definitions.)
+
+    b) Accompany the work with a written offer, valid for at
+    least three years, to give the same user the materials
+    specified in Subsection 6a, above, for a charge no more
+    than the cost of performing this distribution.
+
+    c) If distribution of the work is made by offering access to copy
+    from a designated place, offer equivalent access to copy the above
+    specified materials from the same place.
+
+    d) Verify that the user has already received a copy of these
+    materials or that you have already sent this user a copy.
+
+  For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it.  However, as a special exception,
+the source code distributed need not include anything that is normally
+distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+  It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system.  Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+
+  7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+    a) Accompany the combined library with a copy of the same work
+    based on the Library, uncombined with any other library
+    facilities.  This must be distributed under the terms of the
+    Sections above.
+
+    b) Give prominent notice with the combined library of the fact
+    that part of it is a work based on the Library, and explaining
+    where to find the accompanying uncombined form of the same work.
+
+  8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License.  Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License.  However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+  9. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Library or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+  10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+  11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded.  In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+  13. The Free Software Foundation may publish revised and/or new
+versions of the Library General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation.  If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+
+  14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission.  For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this.  Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+			    NO WARRANTY
+
+  15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU.  SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+		     END OF TERMS AND CONDITIONS
+
+     Appendix: How to Apply These Terms to Your New Libraries
+
+  If you develop a new library, and you want it to be of the greatest
+possible use to the public, we recommend making it free software that
+everyone can redistribute and change.  You can do so by permitting
+redistribution under these terms (or, alternatively, under the terms of the
+ordinary General Public License).
+
+  To apply these terms, attach the following notices to the library.  It is
+safest to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least the
+"copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the library's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This library is free software; you can redistribute it and/or
+    modify it under the terms of the GNU Library General Public
+    License as published by the Free Software Foundation; either
+    version 2 of the License, or (at your option) any later version.
+
+    This library is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+    Library General Public License for more details.
+
+    You should have received a copy of the GNU Library General Public
+    License along with this library; if not, write to the Free
+    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the library, if
+necessary.  Here is a sample; alter the names:
+
+  Yoyodyne, Inc., hereby disclaims all copyright interest in the
+  library `Frob' (a library for tweaking knobs) written by James Random Hacker.
+
+  <signature of Ty Coon>, 1 April 1990
+  Ty Coon, President of Vice
+
+That's all there is to it!
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
new file mode 100644
index 0000000..2296ff9
--- /dev/null
+++ b/arch/sparc/lib/Makefile
@@ -0,0 +1,13 @@
+# $Id: Makefile,v 1.35 2000/12/15 00:41:18 davem Exp $
+# Makefile for Sparc library files..
+#
+
+EXTRA_AFLAGS := -ansi -DST_DIV0=0x02
+
+lib-y := mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o memcpy.o memset.o \
+         strlen.o checksum.o blockops.o memscan.o memcmp.o strncmp.o \
+	 strncpy_from_user.o divdi3.o udivdi3.o strlen_user.o \
+	 copy_user.o locks.o atomic.o atomic32.o bitops.o \
+	 lshrdi3.o ashldi3.o rwsem.o muldi3.o bitext.o
+
+lib-$(CONFIG_DEBUG_SPINLOCK) +=	debuglocks.o
diff --git a/arch/sparc/lib/ashldi3.S b/arch/sparc/lib/ashldi3.S
new file mode 100644
index 0000000..52418a0
--- /dev/null
+++ b/arch/sparc/lib/ashldi3.S
@@ -0,0 +1,34 @@
+/* $Id: ashldi3.S,v 1.2 1999/11/19 04:11:46 davem Exp $
+ * ashldi3.S:	GCC emits these for certain drivers playing
+ *		with long longs.
+ *
+ * Copyright (C) 1999 David S. Miller (davem@redhat.com)
+ */
+
+	.text
+	.align	4
+	.globl	__ashldi3
+__ashldi3:
+	cmp	%o2, 0
+	be	9f
+	 mov	0x20, %g2
+
+	sub	%g2, %o2, %g2
+	cmp	%g2, 0
+	bg	7f
+	 sll	%o0, %o2, %g3
+
+	neg	%g2
+	clr	%o5
+	b	8f
+	 sll	%o1, %g2, %o4
+7:
+	srl	%o1, %g2, %g2
+	sll	%o1, %o2, %o5
+	or	%g3, %g2, %o4
+8:
+	mov	%o4, %o0
+	mov	%o5, %o1
+9:
+	retl
+	 nop
diff --git a/arch/sparc/lib/ashrdi3.S b/arch/sparc/lib/ashrdi3.S
new file mode 100644
index 0000000..2848237
--- /dev/null
+++ b/arch/sparc/lib/ashrdi3.S
@@ -0,0 +1,36 @@
+/* $Id: ashrdi3.S,v 1.4 1999/11/19 04:11:49 davem Exp $
+ * ashrdi3.S:	The filesystem code creates all kinds of references to
+ *              this little routine on the sparc with gcc.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+	.text
+	.align	4
+	.globl __ashrdi3
+__ashrdi3:
+	tst	%o2
+	be	3f
+	 or	%g0, 32, %g2
+
+	sub	%g2, %o2, %g2
+
+	tst	%g2
+	bg	1f
+	 sra	%o0, %o2, %o4
+
+	sra	%o0, 31, %o4
+	sub	%g0, %g2, %g2
+	ba	2f
+	 sra	%o0, %g2, %o5
+
+1:
+	sll	%o0, %g2, %g3
+	srl	%o1, %o2, %g2
+	or	%g2, %g3, %o5
+2:
+	or	%g0, %o4, %o0
+	or	%g0, %o5, %o1
+3:
+	jmpl	%o7 + 8, %g0
+	 nop
diff --git a/arch/sparc/lib/atomic.S b/arch/sparc/lib/atomic.S
new file mode 100644
index 0000000..f48ad0c
--- /dev/null
+++ b/arch/sparc/lib/atomic.S
@@ -0,0 +1,100 @@
+/* atomic.S: Move this stuff here for better ICACHE hit rates.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caipfs.rutgers.edu)
+ */
+
+#include <linux/config.h>
+#include <asm/ptrace.h>
+#include <asm/psr.h>
+
+	.text
+	.align	4
+
+	.globl  __atomic_begin
+__atomic_begin:
+
+#ifndef CONFIG_SMP
+	.globl	___xchg32_sun4c
+___xchg32_sun4c:
+	rd	%psr, %g3
+	andcc	%g3, PSR_PIL, %g0
+	bne	1f
+	 nop
+	wr	%g3, PSR_PIL, %psr
+	nop; nop; nop
+1:
+	andcc	%g3, PSR_PIL, %g0
+	ld	[%g1], %g7
+	bne	1f
+	 st	%g2, [%g1]
+	wr	%g3, 0x0, %psr
+	nop; nop; nop
+1:
+	mov	%g7, %g2
+	jmpl	%o7 + 8, %g0
+	 mov	%g4, %o7
+
+	.globl	___xchg32_sun4md
+___xchg32_sun4md:
+	swap	[%g1], %g2
+	jmpl	%o7 + 8, %g0
+	 mov	%g4, %o7
+#endif
+
+	/* Read asm-sparc/atomic.h carefully to understand how this works for SMP.
+	 * Really, some things here for SMP are overly clever, go read the header.
+	 */
+	.globl	___atomic24_add
+___atomic24_add:
+	rd	%psr, %g3		! Keep the code small, old way was stupid
+	nop; nop; nop;			! Let the bits set
+	or	%g3, PSR_PIL, %g7	! Disable interrupts
+	wr	%g7, 0x0, %psr		! Set %psr
+	nop; nop; nop;			! Let the bits set
+#ifdef CONFIG_SMP
+1:	ldstub	[%g1 + 3], %g7		! Spin on the byte lock for SMP.
+	orcc	%g7, 0x0, %g0		! Did we get it?
+	bne	1b			! Nope...
+	 ld	[%g1], %g7		! Load locked atomic24_t
+	sra	%g7, 8, %g7		! Get signed 24-bit integer
+	add	%g7, %g2, %g2		! Add in argument
+	sll	%g2, 8, %g7		! Transpose back to atomic24_t
+	st	%g7, [%g1]		! Clever: This releases the lock as well.
+#else
+	ld	[%g1], %g7		! Load locked atomic24_t
+	add	%g7, %g2, %g2		! Add in argument
+	st	%g2, [%g1]		! Store it back
+#endif
+	wr	%g3, 0x0, %psr		! Restore original PSR_PIL
+	nop; nop; nop;			! Let the bits set
+	jmpl	%o7, %g0		! NOTE: not + 8, see callers in atomic.h
+	 mov	%g4, %o7		! Restore %o7
+
+	.globl	___atomic24_sub
+___atomic24_sub:
+	rd	%psr, %g3		! Keep the code small, old way was stupid
+	nop; nop; nop;			! Let the bits set
+	or	%g3, PSR_PIL, %g7	! Disable interrupts
+	wr	%g7, 0x0, %psr		! Set %psr
+	nop; nop; nop;			! Let the bits set
+#ifdef CONFIG_SMP
+1:	ldstub	[%g1 + 3], %g7		! Spin on the byte lock for SMP.
+	orcc	%g7, 0x0, %g0		! Did we get it?
+	bne	1b			! Nope...
+	 ld	[%g1], %g7		! Load locked atomic24_t
+	sra	%g7, 8, %g7		! Get signed 24-bit integer
+	sub	%g7, %g2, %g2		! Subtract argument
+	sll	%g2, 8, %g7		! Transpose back to atomic24_t
+	st	%g7, [%g1]		! Clever: This releases the lock as well
+#else
+	ld	[%g1], %g7		! Load locked atomic24_t
+	sub	%g7, %g2, %g2		! Subtract argument
+	st	%g2, [%g1]		! Store it back
+#endif
+	wr	%g3, 0x0, %psr		! Restore original PSR_PIL
+	nop; nop; nop;			! Let the bits set
+	jmpl	%o7, %g0		! NOTE: not + 8, see callers in atomic.h
+	 mov	%g4, %o7		! Restore %o7
+
+	.globl  __atomic_end
+__atomic_end:
diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c
new file mode 100644
index 0000000..19724c5
--- /dev/null
+++ b/arch/sparc/lib/atomic32.c
@@ -0,0 +1,53 @@
+/*
+ * atomic32.c: 32-bit atomic_t implementation
+ *
+ * Copyright (C) 2004 Keith M Wesolowski
+ * 
+ * Based on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf
+ */
+
+#include <asm/atomic.h>
+#include <linux/spinlock.h>
+#include <linux/module.h>
+
+#ifdef CONFIG_SMP
+#define ATOMIC_HASH_SIZE	4
+#define ATOMIC_HASH(a)	(&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)])
+
+spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = {
+	[0 ... (ATOMIC_HASH_SIZE-1)] = SPIN_LOCK_UNLOCKED
+};
+
+#else /* SMP */
+
+static spinlock_t dummy = SPIN_LOCK_UNLOCKED;
+#define ATOMIC_HASH_SIZE	1
+#define ATOMIC_HASH(a)		(&dummy)
+
+#endif /* SMP */
+
+int __atomic_add_return(int i, atomic_t *v)
+{
+	int ret;
+	unsigned long flags;
+	spin_lock_irqsave(ATOMIC_HASH(v), flags);
+
+	ret = (v->counter += i);
+
+	spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
+	return ret;
+}
+
+void atomic_set(atomic_t *v, int i)
+{
+	unsigned long flags;
+	spin_lock_irqsave(ATOMIC_HASH(v), flags);
+
+	v->counter = i;
+
+	spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
+}
+
+EXPORT_SYMBOL(__atomic_add_return);
+EXPORT_SYMBOL(atomic_set);
+
diff --git a/arch/sparc/lib/bitext.c b/arch/sparc/lib/bitext.c
new file mode 100644
index 0000000..94b05e8
--- /dev/null
+++ b/arch/sparc/lib/bitext.c
@@ -0,0 +1,132 @@
+/*
+ * bitext.c: kernel little helper (of bit shuffling variety).
+ *
+ * Copyright (C) 2002 Pete Zaitcev <zaitcev@yahoo.com>
+ *
+ * The algorithm to search a zero bit string is geared towards its application.
+ * We expect a couple of fixed sizes of requests, so a rotating counter, reset
+ * by align size, should provide fast enough search while maintaining low
+ * fragmentation.
+ */
+
+#include <linux/smp_lock.h>
+#include <linux/bitops.h>
+
+#include <asm/bitext.h>
+
+/**
+ * bit_map_string_get - find and set a bit string in bit map.
+ * @t: the bit map.
+ * @len: requested string length
+ * @align: requested alignment
+ *
+ * Returns offset in the map or -1 if out of space.
+ *
+ * Not safe to call from an interrupt (uses spin_lock).
+ */
+int bit_map_string_get(struct bit_map *t, int len, int align)
+{
+	int offset, count;	/* siamese twins */
+	int off_new;
+	int align1;
+	int i, color;
+
+	if (t->num_colors) {
+		/* align is overloaded to be the page color */
+		color = align;
+		align = t->num_colors;
+	} else {
+		color = 0;
+		if (align == 0)
+			align = 1;
+	}
+	align1 = align - 1;
+	if ((align & align1) != 0)
+		BUG();
+	if (align < 0 || align >= t->size)
+		BUG();
+	if (len <= 0 || len > t->size)
+		BUG();
+	color &= align1;
+
+	spin_lock(&t->lock);
+	if (len < t->last_size)
+		offset = t->first_free;
+	else
+		offset = t->last_off & ~align1;
+	count = 0;
+	for (;;) {
+		off_new = find_next_zero_bit(t->map, t->size, offset);
+		off_new = ((off_new + align1) & ~align1) + color;
+		count += off_new - offset;
+		offset = off_new;
+		if (offset >= t->size)
+			offset = 0;
+		if (count + len > t->size) {
+			spin_unlock(&t->lock);
+/* P3 */ printk(KERN_ERR
+  "bitmap out: size %d used %d off %d len %d align %d count %d\n",
+  t->size, t->used, offset, len, align, count);
+			return -1;
+		}
+
+		if (offset + len > t->size) {
+			count += t->size - offset;
+			offset = 0;
+			continue;
+		}
+
+		i = 0;
+		while (test_bit(offset + i, t->map) == 0) {
+			i++;
+			if (i == len) {
+				for (i = 0; i < len; i++)
+					__set_bit(offset + i, t->map);
+				if (offset == t->first_free)
+					t->first_free = find_next_zero_bit
+							(t->map, t->size,
+							 t->first_free + len);
+				if ((t->last_off = offset + len) >= t->size)
+					t->last_off = 0;
+				t->used += len;
+				t->last_size = len;
+				spin_unlock(&t->lock);
+				return offset;
+			}
+		}
+		count += i + 1;
+		if ((offset += i + 1) >= t->size)
+			offset = 0;
+	}
+}
+
+void bit_map_clear(struct bit_map *t, int offset, int len)
+{
+	int i;
+
+	if (t->used < len)
+		BUG();		/* Much too late to do any good, but alas... */
+	spin_lock(&t->lock);
+	for (i = 0; i < len; i++) {
+		if (test_bit(offset + i, t->map) == 0)
+			BUG();
+		__clear_bit(offset + i, t->map);
+	}
+	if (offset < t->first_free)
+		t->first_free = offset;
+	t->used -= len;
+	spin_unlock(&t->lock);
+}
+
+void bit_map_init(struct bit_map *t, unsigned long *map, int size)
+{
+
+	if ((size & 07) != 0)
+		BUG();
+	memset(map, 0, size>>3);
+
+	memset(t, 0, sizeof *t);
+	spin_lock_init(&t->lock);
+	t->map = map;
+	t->size = size;
+}
diff --git a/arch/sparc/lib/bitops.S b/arch/sparc/lib/bitops.S
new file mode 100644
index 0000000..3e93997
--- /dev/null
+++ b/arch/sparc/lib/bitops.S
@@ -0,0 +1,110 @@
+/* bitops.S: Low level assembler bit operations.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/config.h>
+#include <asm/ptrace.h>
+#include <asm/psr.h>
+
+	.text
+	.align	4
+
+	.globl  __bitops_begin
+__bitops_begin:
+
+	/* Take bits in %g2 and set them in word at %g1,
+	 * return whether bits were set in original value
+	 * in %g2.  %g4 holds value to restore into %o7
+	 * in delay slot of jmpl return, %g3 + %g5 + %g7 can be
+	 * used as temporaries and thus is considered clobbered
+	 * by all callers.
+	 */
+	.globl	___set_bit
+___set_bit:
+	rd	%psr, %g3
+	nop; nop; nop;
+	or	%g3, PSR_PIL, %g5
+	wr	%g5, 0x0, %psr
+	nop; nop; nop
+#ifdef CONFIG_SMP
+	set	bitops_spinlock, %g5
+2:	ldstub	[%g5], %g7		! Spin on the byte lock for SMP.
+	orcc	%g7, 0x0, %g0		! Did we get it?
+	bne	2b			! Nope...
+#endif
+	 ld	[%g1], %g7
+	or	%g7, %g2, %g5
+	and	%g7, %g2, %g2
+#ifdef CONFIG_SMP
+	st	%g5, [%g1]
+	set	bitops_spinlock, %g5
+	stb	%g0, [%g5]
+#else
+	st	%g5, [%g1]
+#endif
+	wr	%g3, 0x0, %psr
+	nop; nop; nop
+	jmpl	%o7, %g0
+	 mov	%g4, %o7
+
+	/* Same as above, but clears the bits from %g2 instead. */
+	.globl	___clear_bit
+___clear_bit:
+	rd	%psr, %g3
+	nop; nop; nop
+	or	%g3, PSR_PIL, %g5
+	wr	%g5, 0x0, %psr
+	nop; nop; nop
+#ifdef CONFIG_SMP
+	set	bitops_spinlock, %g5
+2:	ldstub	[%g5], %g7		! Spin on the byte lock for SMP.
+	orcc	%g7, 0x0, %g0		! Did we get it?
+	bne	2b			! Nope...
+#endif
+	 ld	[%g1], %g7
+	andn	%g7, %g2, %g5
+	and	%g7, %g2, %g2
+#ifdef CONFIG_SMP
+	st	%g5, [%g1]
+	set	bitops_spinlock, %g5
+	stb	%g0, [%g5]
+#else
+	st	%g5, [%g1]
+#endif
+	wr	%g3, 0x0, %psr
+	nop; nop; nop
+	jmpl	%o7, %g0
+	 mov	%g4, %o7
+
+	/* Same thing again, but this time toggles the bits from %g2. */
+	.globl	___change_bit
+___change_bit:
+	rd	%psr, %g3
+	nop; nop; nop
+	or	%g3, PSR_PIL, %g5
+	wr	%g5, 0x0, %psr
+	nop; nop; nop
+#ifdef CONFIG_SMP
+	set	bitops_spinlock, %g5
+2:	ldstub	[%g5], %g7		! Spin on the byte lock for SMP.
+	orcc	%g7, 0x0, %g0		! Did we get it?
+	bne	2b			! Nope...
+#endif
+	 ld	[%g1], %g7
+	xor	%g7, %g2, %g5
+	and	%g7, %g2, %g2
+#ifdef CONFIG_SMP
+	st	%g5, [%g1]
+	set	bitops_spinlock, %g5
+	stb	%g0, [%g5]
+#else
+	st	%g5, [%g1]
+#endif
+	wr	%g3, 0x0, %psr
+	nop; nop; nop
+	jmpl	%o7, %g0
+	 mov	%g4, %o7
+
+	.globl  __bitops_end
+__bitops_end:
diff --git a/arch/sparc/lib/blockops.S b/arch/sparc/lib/blockops.S
new file mode 100644
index 0000000..a7c7ffa
--- /dev/null
+++ b/arch/sparc/lib/blockops.S
@@ -0,0 +1,89 @@
+/* $Id: blockops.S,v 1.8 1998/01/30 10:58:44 jj Exp $
+ * blockops.S: Common block zero optimized routines.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <asm/page.h>
+
+	/* Zero out 64 bytes of memory at (buf + offset).
+	 * Assumes %g1 contains zero.
+	 */
+#define BLAST_BLOCK(buf, offset) \
+	std	%g0, [buf + offset + 0x38]; \
+	std	%g0, [buf + offset + 0x30]; \
+	std	%g0, [buf + offset + 0x28]; \
+	std	%g0, [buf + offset + 0x20]; \
+	std	%g0, [buf + offset + 0x18]; \
+	std	%g0, [buf + offset + 0x10]; \
+	std	%g0, [buf + offset + 0x08]; \
+	std	%g0, [buf + offset + 0x00];
+
+	/* Copy 32 bytes of memory at (src + offset) to
+	 * (dst + offset).
+	 */
+#define MIRROR_BLOCK(dst, src, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
+	ldd	[src + offset + 0x18], t0; \
+	ldd	[src + offset + 0x10], t2; \
+	ldd	[src + offset + 0x08], t4; \
+	ldd	[src + offset + 0x00], t6; \
+	std	t0, [dst + offset + 0x18]; \
+	std	t2, [dst + offset + 0x10]; \
+	std	t4, [dst + offset + 0x08]; \
+	std	t6, [dst + offset + 0x00];
+
+	/* Profiling evidence indicates that memset() is
+	 * commonly called for blocks of size PAGE_SIZE,
+	 * and (2 * PAGE_SIZE) (for kernel stacks)
+	 * and with a second arg of zero.  We assume in
+	 * all of these cases that the buffer is aligned
+	 * on at least an 8 byte boundary.
+	 *
+	 * Therefore we special case them to make them
+	 * as fast as possible.
+	 */
+
+	.text
+	.align	4
+	.globl	bzero_1page, __copy_1page
+
+bzero_1page:
+/* NOTE: If you change the number of insns of this routine, please check
+ * arch/sparc/mm/hypersparc.S */
+	/* %o0 = buf */
+	or	%g0, %g0, %g1
+	or	%o0, %g0, %o1
+	or	%g0, (PAGE_SIZE >> 8), %g2
+1:
+	BLAST_BLOCK(%o0, 0x00)
+	BLAST_BLOCK(%o0, 0x40)
+	BLAST_BLOCK(%o0, 0x80)
+	BLAST_BLOCK(%o0, 0xc0)
+	subcc	%g2, 1, %g2
+	bne	1b
+	 add	%o0, 0x100, %o0
+
+	retl
+	 nop
+
+__copy_1page:
+/* NOTE: If you change the number of insns of this routine, please check
+ * arch/sparc/mm/hypersparc.S */
+	/* %o0 = dst, %o1 = src */
+	or	%g0, (PAGE_SIZE >> 8), %g1
+1:
+	MIRROR_BLOCK(%o0, %o1, 0x00, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5)
+	MIRROR_BLOCK(%o0, %o1, 0x20, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5)
+	MIRROR_BLOCK(%o0, %o1, 0x40, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5)
+	MIRROR_BLOCK(%o0, %o1, 0x60, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5)
+	MIRROR_BLOCK(%o0, %o1, 0x80, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5)
+	MIRROR_BLOCK(%o0, %o1, 0xa0, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5)
+	MIRROR_BLOCK(%o0, %o1, 0xc0, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5)
+	MIRROR_BLOCK(%o0, %o1, 0xe0, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5)
+	subcc	%g1, 1, %g1
+	add	%o0, 0x100, %o0
+	bne	1b
+	 add	%o1, 0x100, %o1
+
+	retl
+	 nop
diff --git a/arch/sparc/lib/checksum.S b/arch/sparc/lib/checksum.S
new file mode 100644
index 0000000..77f2285
--- /dev/null
+++ b/arch/sparc/lib/checksum.S
@@ -0,0 +1,583 @@
+/* checksum.S: Sparc optimized checksum code.
+ *
+ *  Copyright(C) 1995 Linus Torvalds
+ *  Copyright(C) 1995 Miguel de Icaza
+ *  Copyright(C) 1996 David S. Miller
+ *  Copyright(C) 1997 Jakub Jelinek
+ *
+ * derived from:
+ *	Linux/Alpha checksum c-code
+ *      Linux/ix86 inline checksum assembly
+ *      RFC1071 Computing the Internet Checksum (esp. Jacobsons m68k code)
+ *	David Mosberger-Tang for optimized reference c-code
+ *	BSD4.4 portable checksum routine
+ */
+
+#include <asm/errno.h>
+
+#define CSUM_BIGCHUNK(buf, offset, sum, t0, t1, t2, t3, t4, t5)	\
+	ldd	[buf + offset + 0x00], t0;			\
+	ldd	[buf + offset + 0x08], t2;			\
+	addxcc	t0, sum, sum;					\
+	addxcc	t1, sum, sum;					\
+	ldd	[buf + offset + 0x10], t4;			\
+	addxcc	t2, sum, sum;					\
+	addxcc	t3, sum, sum;					\
+	ldd	[buf + offset + 0x18], t0;			\
+	addxcc	t4, sum, sum;					\
+	addxcc	t5, sum, sum;					\
+	addxcc	t0, sum, sum;					\
+	addxcc	t1, sum, sum;
+
+#define CSUM_LASTCHUNK(buf, offset, sum, t0, t1, t2, t3)	\
+	ldd	[buf - offset - 0x08], t0;			\
+	ldd	[buf - offset - 0x00], t2;			\
+	addxcc	t0, sum, sum;					\
+	addxcc	t1, sum, sum;					\
+	addxcc	t2, sum, sum;					\
+	addxcc	t3, sum, sum;
+
+	/* Do end cruft out of band to get better cache patterns. */
+csum_partial_end_cruft:
+	be	1f				! caller asks %o1 & 0x8
+	 andcc	%o1, 4, %g0			! nope, check for word remaining
+	ldd	[%o0], %g2			! load two
+	addcc	%g2, %o2, %o2			! add first word to sum
+	addxcc	%g3, %o2, %o2			! add second word as well
+	add	%o0, 8, %o0			! advance buf ptr
+	addx	%g0, %o2, %o2			! add in final carry
+	andcc	%o1, 4, %g0			! check again for word remaining
+1:	be	1f				! nope, skip this code
+	 andcc	%o1, 3, %o1			! check for trailing bytes
+	ld	[%o0], %g2			! load it
+	addcc	%g2, %o2, %o2			! add to sum
+	add	%o0, 4, %o0			! advance buf ptr
+	addx	%g0, %o2, %o2			! add in final carry
+	andcc	%o1, 3, %g0			! check again for trailing bytes
+1:	be	1f				! no trailing bytes, return
+	 addcc	%o1, -1, %g0			! only one byte remains?
+	bne	2f				! at least two bytes more
+	 subcc	%o1, 2, %o1			! only two bytes more?
+	b	4f				! only one byte remains
+	 or	%g0, %g0, %o4			! clear fake hword value
+2:	lduh	[%o0], %o4			! get hword
+	be	6f				! jmp if only hword remains
+	 add	%o0, 2, %o0			! advance buf ptr either way
+	sll	%o4, 16, %o4			! create upper hword
+4:	ldub	[%o0], %o5			! get final byte
+	sll	%o5, 8, %o5			! put into place
+	or	%o5, %o4, %o4			! coalese with hword (if any)
+6:	addcc	%o4, %o2, %o2			! add to sum
+1:	retl					! get outta here
+	 addx	%g0, %o2, %o0			! add final carry into retval
+
+	/* Also do alignment out of band to get better cache patterns. */
+csum_partial_fix_alignment:
+	cmp	%o1, 6
+	bl	cpte - 0x4
+	 andcc	%o0, 0x2, %g0
+	be	1f
+	 andcc	%o0, 0x4, %g0
+	lduh	[%o0 + 0x00], %g2
+	sub	%o1, 2, %o1
+	add	%o0, 2, %o0
+	sll	%g2, 16, %g2
+	addcc	%g2, %o2, %o2
+	srl	%o2, 16, %g3
+	addx	%g0, %g3, %g2
+	sll	%o2, 16, %o2
+	sll	%g2, 16, %g3
+	srl	%o2, 16, %o2
+	andcc	%o0, 0x4, %g0
+	or	%g3, %o2, %o2
+1:	be	cpa
+	 andcc	%o1, 0xffffff80, %o3
+	ld	[%o0 + 0x00], %g2
+	sub	%o1, 4, %o1
+	addcc	%g2, %o2, %o2
+	add	%o0, 4, %o0
+	addx	%g0, %o2, %o2
+	b	cpa
+	 andcc	%o1, 0xffffff80, %o3
+
+	/* The common case is to get called with a nicely aligned
+	 * buffer of size 0x20.  Follow the code path for that case.
+	 */
+	.globl	csum_partial
+csum_partial:			/* %o0=buf, %o1=len, %o2=sum */
+	andcc	%o0, 0x7, %g0				! alignment problems?
+	bne	csum_partial_fix_alignment		! yep, handle it
+	 sethi	%hi(cpte - 8), %g7			! prepare table jmp ptr
+	andcc	%o1, 0xffffff80, %o3			! num loop iterations
+cpa:	be	3f					! none to do
+	 andcc	%o1, 0x70, %g1				! clears carry flag too
+5:	CSUM_BIGCHUNK(%o0, 0x00, %o2, %o4, %o5, %g2, %g3, %g4, %g5)
+	CSUM_BIGCHUNK(%o0, 0x20, %o2, %o4, %o5, %g2, %g3, %g4, %g5)
+	CSUM_BIGCHUNK(%o0, 0x40, %o2, %o4, %o5, %g2, %g3, %g4, %g5)
+	CSUM_BIGCHUNK(%o0, 0x60, %o2, %o4, %o5, %g2, %g3, %g4, %g5)
+	addx	%g0, %o2, %o2				! sink in final carry
+	subcc	%o3, 128, %o3				! detract from loop iters
+	bne	5b					! more to do
+	 add	%o0, 128, %o0				! advance buf ptr
+	andcc	%o1, 0x70, %g1				! clears carry flag too
+3:	be	cpte					! nope
+	 andcc	%o1, 0xf, %g0				! anything left at all?
+	srl	%g1, 1, %o4				! compute offset
+	sub	%g7, %g1, %g7				! adjust jmp ptr
+	sub	%g7, %o4, %g7				! final jmp ptr adjust
+	jmp	%g7 + %lo(cpte - 8)			! enter the table
+	 add	%o0, %g1, %o0				! advance buf ptr
+cptbl:	CSUM_LASTCHUNK(%o0, 0x68, %o2, %g2, %g3, %g4, %g5)
+	CSUM_LASTCHUNK(%o0, 0x58, %o2, %g2, %g3, %g4, %g5)
+	CSUM_LASTCHUNK(%o0, 0x48, %o2, %g2, %g3, %g4, %g5)
+	CSUM_LASTCHUNK(%o0, 0x38, %o2, %g2, %g3, %g4, %g5)
+	CSUM_LASTCHUNK(%o0, 0x28, %o2, %g2, %g3, %g4, %g5)
+	CSUM_LASTCHUNK(%o0, 0x18, %o2, %g2, %g3, %g4, %g5)
+	CSUM_LASTCHUNK(%o0, 0x08, %o2, %g2, %g3, %g4, %g5)
+	addx	%g0, %o2, %o2				! fetch final carry
+	andcc	%o1, 0xf, %g0				! anything left at all?
+cpte:	bne	csum_partial_end_cruft			! yep, handle it
+	 andcc	%o1, 8, %g0				! check how much
+cpout:	retl						! get outta here
+	 mov	%o2, %o0				! return computed csum
+
+	.globl __csum_partial_copy_start, __csum_partial_copy_end
+__csum_partial_copy_start:
+
+/* Work around cpp -rob */
+#define ALLOC #alloc
+#define EXECINSTR #execinstr
+#define EX(x,y,a,b)				\
+98:     x,y;                                    \
+        .section .fixup,ALLOC,EXECINSTR;	\
+        .align  4;                              \
+99:     ba 30f;                                 \
+         a, b, %o3;                             \
+        .section __ex_table,ALLOC;		\
+        .align  4;                              \
+        .word   98b, 99b;                       \
+        .text;                                  \
+        .align  4
+
+#define EX2(x,y)				\
+98:     x,y;                                    \
+        .section __ex_table,ALLOC;		\
+        .align  4;                              \
+        .word   98b, 30f;                       \
+        .text;                                  \
+        .align  4
+
+#define EX3(x,y)				\
+98:     x,y;                                    \
+        .section __ex_table,ALLOC;		\
+        .align  4;                              \
+        .word   98b, 96f;                       \
+        .text;                                  \
+        .align  4
+
+#define EXT(start,end,handler)			\
+        .section __ex_table,ALLOC;		\
+        .align  4;                              \
+        .word   start, 0, end, handler;         \
+        .text;                                  \
+        .align  4
+
+	/* This aligned version executes typically in 8.5 superscalar cycles, this
+	 * is the best I can do.  I say 8.5 because the final add will pair with
+	 * the next ldd in the main unrolled loop.  Thus the pipe is always full.
+	 * If you change these macros (including order of instructions),
+	 * please check the fixup code below as well.
+	 */
+#define CSUMCOPY_BIGCHUNK_ALIGNED(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7)	\
+	ldd	[src + off + 0x00], t0;							\
+	ldd	[src + off + 0x08], t2;							\
+	addxcc	t0, sum, sum;								\
+	ldd	[src + off + 0x10], t4;							\
+	addxcc	t1, sum, sum;								\
+	ldd	[src + off + 0x18], t6;							\
+	addxcc	t2, sum, sum;								\
+	std	t0, [dst + off + 0x00];							\
+	addxcc	t3, sum, sum;								\
+	std	t2, [dst + off + 0x08];							\
+	addxcc	t4, sum, sum;								\
+	std	t4, [dst + off + 0x10];							\
+	addxcc	t5, sum, sum;								\
+	std	t6, [dst + off + 0x18];							\
+	addxcc	t6, sum, sum;								\
+	addxcc	t7, sum, sum;
+
+	/* 12 superscalar cycles seems to be the limit for this case,
+	 * because of this we thus do all the ldd's together to get
+	 * Viking MXCC into streaming mode.  Ho hum...
+	 */
+#define CSUMCOPY_BIGCHUNK(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7)	\
+	ldd	[src + off + 0x00], t0;						\
+	ldd	[src + off + 0x08], t2;						\
+	ldd	[src + off + 0x10], t4;						\
+	ldd	[src + off + 0x18], t6;						\
+	st	t0, [dst + off + 0x00];						\
+	addxcc	t0, sum, sum;							\
+	st	t1, [dst + off + 0x04];						\
+	addxcc	t1, sum, sum;							\
+	st	t2, [dst + off + 0x08];						\
+	addxcc	t2, sum, sum;							\
+	st	t3, [dst + off + 0x0c];						\
+	addxcc	t3, sum, sum;							\
+	st	t4, [dst + off + 0x10];						\
+	addxcc	t4, sum, sum;							\
+	st	t5, [dst + off + 0x14];						\
+	addxcc	t5, sum, sum;							\
+	st	t6, [dst + off + 0x18];						\
+	addxcc	t6, sum, sum;							\
+	st	t7, [dst + off + 0x1c];						\
+	addxcc	t7, sum, sum;
+
+	/* Yuck, 6 superscalar cycles... */
+#define CSUMCOPY_LASTCHUNK(src, dst, sum, off, t0, t1, t2, t3)	\
+	ldd	[src - off - 0x08], t0;				\
+	ldd	[src - off - 0x00], t2;				\
+	addxcc	t0, sum, sum;					\
+	st	t0, [dst - off - 0x08];				\
+	addxcc	t1, sum, sum;					\
+	st	t1, [dst - off - 0x04];				\
+	addxcc	t2, sum, sum;					\
+	st	t2, [dst - off - 0x00];				\
+	addxcc	t3, sum, sum;					\
+	st	t3, [dst - off + 0x04];
+
+	/* Handle the end cruft code out of band for better cache patterns. */
+cc_end_cruft:
+	be	1f
+	 andcc	%o3, 4, %g0
+	EX(ldd	[%o0 + 0x00], %g2, and %o3, 0xf)
+	add	%o1, 8, %o1
+	addcc	%g2, %g7, %g7
+	add	%o0, 8, %o0
+	addxcc	%g3, %g7, %g7
+	EX2(st	%g2, [%o1 - 0x08])
+	addx	%g0, %g7, %g7
+	andcc	%o3, 4, %g0
+	EX2(st	%g3, [%o1 - 0x04])
+1:	be	1f
+	 andcc	%o3, 3, %o3
+	EX(ld	[%o0 + 0x00], %g2, add %o3, 4)
+	add	%o1, 4, %o1
+	addcc	%g2, %g7, %g7
+	EX2(st	%g2, [%o1 - 0x04])
+	addx	%g0, %g7, %g7
+	andcc	%o3, 3, %g0
+	add	%o0, 4, %o0
+1:	be	1f
+	 addcc	%o3, -1, %g0
+	bne	2f
+	 subcc	%o3, 2, %o3
+	b	4f
+	 or	%g0, %g0, %o4
+2:	EX(lduh	[%o0 + 0x00], %o4, add %o3, 2)
+	add	%o0, 2, %o0
+	EX2(sth	%o4, [%o1 + 0x00])
+	be	6f
+	 add	%o1, 2, %o1
+	sll	%o4, 16, %o4
+4:	EX(ldub	[%o0 + 0x00], %o5, add %g0, 1)
+	EX2(stb	%o5, [%o1 + 0x00])
+	sll	%o5, 8, %o5
+	or	%o5, %o4, %o4
+6:	addcc	%o4, %g7, %g7
+1:	retl
+	 addx	%g0, %g7, %o0
+
+	/* Also, handle the alignment code out of band. */
+cc_dword_align:
+	cmp	%g1, 6
+	bl,a	ccte
+	 andcc	%g1, 0xf, %o3
+	andcc	%o0, 0x1, %g0
+	bne	ccslow
+	 andcc	%o0, 0x2, %g0
+	be	1f
+	 andcc	%o0, 0x4, %g0
+	EX(lduh	[%o0 + 0x00], %g4, add %g1, 0)
+	sub	%g1, 2, %g1
+	EX2(sth	%g4, [%o1 + 0x00])
+	add	%o0, 2, %o0
+	sll	%g4, 16, %g4
+	addcc	%g4, %g7, %g7
+	add	%o1, 2, %o1
+	srl	%g7, 16, %g3
+	addx	%g0, %g3, %g4
+	sll	%g7, 16, %g7
+	sll	%g4, 16, %g3
+	srl	%g7, 16, %g7
+	andcc	%o0, 0x4, %g0
+	or	%g3, %g7, %g7
+1:	be	3f
+	 andcc	%g1, 0xffffff80, %g0
+	EX(ld	[%o0 + 0x00], %g4, add %g1, 0)
+	sub	%g1, 4, %g1
+	EX2(st	%g4, [%o1 + 0x00])
+	add	%o0, 4, %o0
+	addcc	%g4, %g7, %g7
+	add	%o1, 4, %o1
+	addx	%g0, %g7, %g7
+	b	3f
+	 andcc	%g1, 0xffffff80, %g0
+
+	/* Sun, you just can't beat me, you just can't.  Stop trying,
+	 * give up.  I'm serious, I am going to kick the living shit
+	 * out of you, game over, lights out.
+	 */
+	.align	8
+	.globl	__csum_partial_copy_sparc_generic
+__csum_partial_copy_sparc_generic:
+					/* %o0=src, %o1=dest, %g1=len, %g7=sum */
+	xor	%o0, %o1, %o4		! get changing bits
+	andcc	%o4, 3, %g0		! check for mismatched alignment
+	bne	ccslow			! better this than unaligned/fixups
+	 andcc	%o0, 7, %g0		! need to align things?
+	bne	cc_dword_align		! yes, we check for short lengths there
+	 andcc	%g1, 0xffffff80, %g0	! can we use unrolled loop?
+3:	be	3f			! nope, less than one loop remains
+	 andcc	%o1, 4, %g0		! dest aligned on 4 or 8 byte boundary?
+	be	ccdbl + 4		! 8 byte aligned, kick ass
+5:	CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x00,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
+	CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
+	CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
+	CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
+10:	EXT(5b, 10b, 20f)		! note for exception handling
+	sub	%g1, 128, %g1		! detract from length
+	addx	%g0, %g7, %g7		! add in last carry bit
+	andcc	%g1, 0xffffff80, %g0	! more to csum?
+	add	%o0, 128, %o0		! advance src ptr
+	bne	5b			! we did not go negative, continue looping
+	 add	%o1, 128, %o1		! advance dest ptr
+3:	andcc	%g1, 0x70, %o2		! can use table?
+ccmerge:be	ccte			! nope, go and check for end cruft
+	 andcc	%g1, 0xf, %o3		! get low bits of length (clears carry btw)
+	srl	%o2, 1, %o4		! begin negative offset computation
+	sethi	%hi(12f), %o5		! set up table ptr end
+	add	%o0, %o2, %o0		! advance src ptr
+	sub	%o5, %o4, %o5		! continue table calculation
+	sll	%o2, 1, %g2		! constant multiplies are fun...
+	sub	%o5, %g2, %o5		! some more adjustments
+	jmp	%o5 + %lo(12f)		! jump into it, duff style, wheee...
+	 add	%o1, %o2, %o1		! advance dest ptr (carry is clear btw)
+cctbl:	CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x68,%g2,%g3,%g4,%g5)
+	CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x58,%g2,%g3,%g4,%g5)
+	CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x48,%g2,%g3,%g4,%g5)
+	CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x38,%g2,%g3,%g4,%g5)
+	CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x28,%g2,%g3,%g4,%g5)
+	CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x18,%g2,%g3,%g4,%g5)
+	CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x08,%g2,%g3,%g4,%g5)
+12:	EXT(cctbl, 12b, 22f)		! note for exception table handling
+	addx	%g0, %g7, %g7
+	andcc	%o3, 0xf, %g0		! check for low bits set
+ccte:	bne	cc_end_cruft		! something left, handle it out of band
+	 andcc	%o3, 8, %g0		! begin checks for that code
+	retl				! return
+	 mov	%g7, %o0		! give em the computed checksum
+ccdbl:	CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x00,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
+	CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
+	CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
+	CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
+11:	EXT(ccdbl, 11b, 21f)		! note for exception table handling
+	sub	%g1, 128, %g1		! detract from length
+	addx	%g0, %g7, %g7		! add in last carry bit
+	andcc	%g1, 0xffffff80, %g0	! more to csum?
+	add	%o0, 128, %o0		! advance src ptr
+	bne	ccdbl			! we did not go negative, continue looping
+	 add	%o1, 128, %o1		! advance dest ptr
+	b	ccmerge			! finish it off, above
+	 andcc	%g1, 0x70, %o2		! can use table? (clears carry btw)
+
+ccslow:	cmp	%g1, 0
+	mov	0, %g5
+	bleu	4f
+	 andcc	%o0, 1, %o5		
+	be,a	1f
+	 srl	%g1, 1, %g4		
+	sub	%g1, 1, %g1	
+	EX(ldub	[%o0], %g5, add %g1, 1)
+	add	%o0, 1, %o0	
+	EX2(stb	%g5, [%o1])
+	srl	%g1, 1, %g4
+	add	%o1, 1, %o1
+1:	cmp	%g4, 0		
+	be,a	3f
+	 andcc	%g1, 1, %g0
+	andcc	%o0, 2, %g0	
+	be,a	1f
+	 srl	%g4, 1, %g4
+	EX(lduh	[%o0], %o4, add %g1, 0)
+	sub	%g1, 2, %g1	
+	srl	%o4, 8, %g2
+	sub	%g4, 1, %g4	
+	EX2(stb	%g2, [%o1])
+	add	%o4, %g5, %g5
+	EX2(stb	%o4, [%o1 + 1])
+	add	%o0, 2, %o0	
+	srl	%g4, 1, %g4
+	add	%o1, 2, %o1
+1:	cmp	%g4, 0		
+	be,a	2f
+	 andcc	%g1, 2, %g0
+	EX3(ld	[%o0], %o4)
+5:	srl	%o4, 24, %g2
+	srl	%o4, 16, %g3
+	EX2(stb	%g2, [%o1])
+	srl	%o4, 8, %g2
+	EX2(stb	%g3, [%o1 + 1])
+	add	%o0, 4, %o0
+	EX2(stb	%g2, [%o1 + 2])
+	addcc	%o4, %g5, %g5
+	EX2(stb	%o4, [%o1 + 3])
+	addx	%g5, %g0, %g5	! I am now to lazy to optimize this (question it
+	add	%o1, 4, %o1	! is worthy). Maybe some day - with the sll/srl
+	subcc	%g4, 1, %g4	! tricks
+	bne,a	5b
+	 EX3(ld	[%o0], %o4)
+	sll	%g5, 16, %g2
+	srl	%g5, 16, %g5
+	srl	%g2, 16, %g2
+	andcc	%g1, 2, %g0
+	add	%g2, %g5, %g5 
+2:	be,a	3f		
+	 andcc	%g1, 1, %g0
+	EX(lduh	[%o0], %o4, and %g1, 3)
+	andcc	%g1, 1, %g0
+	srl	%o4, 8, %g2
+	add	%o0, 2, %o0	
+	EX2(stb	%g2, [%o1])
+	add	%g5, %o4, %g5
+	EX2(stb	%o4, [%o1 + 1])
+	add	%o1, 2, %o1
+3:	be,a	1f		
+	 sll	%g5, 16, %o4
+	EX(ldub	[%o0], %g2, add %g0, 1)
+	sll	%g2, 8, %o4	
+	EX2(stb	%g2, [%o1])
+	add	%g5, %o4, %g5
+	sll	%g5, 16, %o4
+1:	addcc	%o4, %g5, %g5
+	srl	%g5, 16, %o4
+	addx	%g0, %o4, %g5
+	orcc	%o5, %g0, %g0
+	be	4f
+	 srl	%g5, 8, %o4
+	and	%g5, 0xff, %g2
+	and	%o4, 0xff, %o4
+	sll	%g2, 8, %g2
+	or	%g2, %o4, %g5
+4:	addcc	%g7, %g5, %g7
+	retl	
+	 addx	%g0, %g7, %o0
+__csum_partial_copy_end:
+
+/* We do these strange calculations for the csum_*_from_user case only, ie.
+ * we only bother with faults on loads... */
+
+/* o2 = ((g2%20)&3)*8
+ * o3 = g1 - (g2/20)*32 - o2 */
+20:
+	cmp	%g2, 20
+	blu,a	1f
+	 and	%g2, 3, %o2
+	sub	%g1, 32, %g1
+	b	20b
+	 sub	%g2, 20, %g2
+1:
+	sll	%o2, 3, %o2
+	b	31f
+	 sub	%g1, %o2, %o3
+
+/* o2 = (!(g2 & 15) ? 0 : (((g2 & 15) + 1) & ~1)*8)
+ * o3 = g1 - (g2/16)*32 - o2 */
+21:
+	andcc	%g2, 15, %o3
+	srl	%g2, 4, %g2
+	be,a	1f
+	 clr	%o2
+	add	%o3, 1, %o3
+	and	%o3, 14, %o3
+	sll	%o3, 3, %o2
+1:
+	sll	%g2, 5, %g2
+	sub	%g1, %g2, %o3
+	b	31f
+	 sub	%o3, %o2, %o3
+
+/* o0 += (g2/10)*16 - 0x70
+ * 01 += (g2/10)*16 - 0x70
+ * o2 = (g2 % 10) ? 8 : 0
+ * o3 += 0x70 - (g2/10)*16 - o2 */
+22:
+	cmp	%g2, 10
+	blu,a	1f
+	 sub	%o0, 0x70, %o0
+	add	%o0, 16, %o0
+	add	%o1, 16, %o1
+	sub	%o3, 16, %o3
+	b	22b
+	 sub	%g2, 10, %g2
+1:
+	sub	%o1, 0x70, %o1
+	add	%o3, 0x70, %o3
+	clr	%o2
+	tst	%g2
+	bne,a	1f
+	 mov	8, %o2
+1:
+	b	31f
+	 sub	%o3, %o2, %o3
+96:
+	and	%g1, 3, %g1
+	sll	%g4, 2, %g4
+	add	%g1, %g4, %o3
+30:
+/* %o1 is dst
+ * %o3 is # bytes to zero out
+ * %o4 is faulting address
+ * %o5 is %pc where fault occurred */
+	clr	%o2
+31:
+/* %o0 is src
+ * %o1 is dst
+ * %o2 is # of bytes to copy from src to dst
+ * %o3 is # bytes to zero out
+ * %o4 is faulting address
+ * %o5 is %pc where fault occurred */
+	save	%sp, -104, %sp
+        mov     %i5, %o0
+        mov     %i7, %o1
+        mov	%i4, %o2
+        call    lookup_fault
+	 mov	%g7, %i4
+	cmp	%o0, 2
+	bne	1f	
+	 add	%g0, -EFAULT, %i5
+	tst	%i2
+	be	2f
+	 mov	%i0, %o1
+	mov	%i1, %o0
+5:
+	call	__memcpy
+	 mov	%i2, %o2
+	tst	%o0
+	bne,a	2f
+	 add	%i3, %i2, %i3
+	add	%i1, %i2, %i1
+2:
+	mov	%i1, %o0
+6:
+	call	__bzero
+	 mov	%i3, %o1
+1:
+	ld	[%sp + 168], %o2		! struct_ptr of parent
+	st	%i5, [%o2]
+	ret
+	 restore
+
+        .section __ex_table,#alloc
+        .align 4
+        .word 5b,2
+	.word 6b,2
diff --git a/arch/sparc/lib/copy_user.S b/arch/sparc/lib/copy_user.S
new file mode 100644
index 0000000..577505b
--- /dev/null
+++ b/arch/sparc/lib/copy_user.S
@@ -0,0 +1,492 @@
+/* copy_user.S: Sparc optimized copy_from_user and copy_to_user code.
+ *
+ *  Copyright(C) 1995 Linus Torvalds
+ *  Copyright(C) 1996 David S. Miller
+ *  Copyright(C) 1996 Eddie C. Dost
+ *  Copyright(C) 1996,1998 Jakub Jelinek
+ *
+ * derived from:
+ *	e-mail between David and Eddie.
+ *
+ * Returns 0 if successful, otherwise count of bytes not copied yet
+ */
+
+#include <asm/ptrace.h>
+#include <asm/asmmacro.h>
+#include <asm/page.h>
+
+/* Work around cpp -rob */
+#define ALLOC #alloc
+#define EXECINSTR #execinstr
+#define EX(x,y,a,b) 				\
+98: 	x,y;					\
+	.section .fixup,ALLOC,EXECINSTR;	\
+	.align	4;				\
+99:	ba fixupretl;				\
+	 a, b, %g3;				\
+	.section __ex_table,ALLOC;		\
+	.align	4;				\
+	.word	98b, 99b;			\
+	.text;					\
+	.align	4
+
+#define EX2(x,y,c,d,e,a,b) 			\
+98: 	x,y;					\
+	.section .fixup,ALLOC,EXECINSTR;	\
+	.align	4;				\
+99:	c, d, e;				\
+	ba fixupretl;				\
+	 a, b, %g3;				\
+	.section __ex_table,ALLOC;		\
+	.align	4;				\
+	.word	98b, 99b;			\
+	.text;					\
+	.align	4
+
+#define EXO2(x,y) 				\
+98: 	x, y;					\
+	.section __ex_table,ALLOC;		\
+	.align	4;				\
+	.word	98b, 97f;			\
+	.text;					\
+	.align	4
+
+#define EXT(start,end,handler)			\
+	.section __ex_table,ALLOC;		\
+	.align	4;				\
+	.word	start, 0, end, handler;		\
+	.text;					\
+	.align	4
+
+/* Please do not change following macros unless you change logic used
+ * in .fixup at the end of this file as well
+ */
+
+/* Both these macros have to start with exactly the same insn */
+#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
+	ldd	[%src + (offset) + 0x00], %t0; \
+	ldd	[%src + (offset) + 0x08], %t2; \
+	ldd	[%src + (offset) + 0x10], %t4; \
+	ldd	[%src + (offset) + 0x18], %t6; \
+	st	%t0, [%dst + (offset) + 0x00]; \
+	st	%t1, [%dst + (offset) + 0x04]; \
+	st	%t2, [%dst + (offset) + 0x08]; \
+	st	%t3, [%dst + (offset) + 0x0c]; \
+	st	%t4, [%dst + (offset) + 0x10]; \
+	st	%t5, [%dst + (offset) + 0x14]; \
+	st	%t6, [%dst + (offset) + 0x18]; \
+	st	%t7, [%dst + (offset) + 0x1c];
+
+#define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
+	ldd	[%src + (offset) + 0x00], %t0; \
+	ldd	[%src + (offset) + 0x08], %t2; \
+	ldd	[%src + (offset) + 0x10], %t4; \
+	ldd	[%src + (offset) + 0x18], %t6; \
+	std	%t0, [%dst + (offset) + 0x00]; \
+	std	%t2, [%dst + (offset) + 0x08]; \
+	std	%t4, [%dst + (offset) + 0x10]; \
+	std	%t6, [%dst + (offset) + 0x18];
+
+#define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
+	ldd	[%src - (offset) - 0x10], %t0; \
+	ldd	[%src - (offset) - 0x08], %t2; \
+	st	%t0, [%dst - (offset) - 0x10]; \
+	st	%t1, [%dst - (offset) - 0x0c]; \
+	st	%t2, [%dst - (offset) - 0x08]; \
+	st	%t3, [%dst - (offset) - 0x04];
+
+#define MOVE_HALFCHUNK(src, dst, offset, t0, t1, t2, t3) \
+	lduh	[%src + (offset) + 0x00], %t0; \
+	lduh	[%src + (offset) + 0x02], %t1; \
+	lduh	[%src + (offset) + 0x04], %t2; \
+	lduh	[%src + (offset) + 0x06], %t3; \
+	sth	%t0, [%dst + (offset) + 0x00]; \
+	sth	%t1, [%dst + (offset) + 0x02]; \
+	sth	%t2, [%dst + (offset) + 0x04]; \
+	sth	%t3, [%dst + (offset) + 0x06];
+
+#define MOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
+	ldub	[%src - (offset) - 0x02], %t0; \
+	ldub	[%src - (offset) - 0x01], %t1; \
+	stb	%t0, [%dst - (offset) - 0x02]; \
+	stb	%t1, [%dst - (offset) - 0x01];
+
+	.text
+	.align	4
+
+	.globl  __copy_user_begin
+__copy_user_begin:
+
+	.globl	__copy_user
+dword_align:
+	andcc	%o1, 1, %g0
+	be	4f
+	 andcc	%o1, 2, %g0
+
+	EXO2(ldub [%o1], %g2)
+	add	%o1, 1, %o1
+	EXO2(stb %g2, [%o0])
+	sub	%o2, 1, %o2
+	bne	3f
+	 add	%o0, 1, %o0
+
+	EXO2(lduh [%o1], %g2)
+	add	%o1, 2, %o1
+	EXO2(sth %g2, [%o0])
+	sub	%o2, 2, %o2
+	b	3f
+	 add	%o0, 2, %o0
+4:
+	EXO2(lduh [%o1], %g2)
+	add	%o1, 2, %o1
+	EXO2(sth %g2, [%o0])
+	sub	%o2, 2, %o2
+	b	3f
+	 add	%o0, 2, %o0
+
+__copy_user:	/* %o0=dst %o1=src %o2=len */
+	xor	%o0, %o1, %o4
+1:
+	andcc	%o4, 3, %o5
+2:
+	bne	cannot_optimize
+	 cmp	%o2, 15
+
+	bleu	short_aligned_end
+	 andcc	%o1, 3, %g0
+
+	bne	dword_align
+3:
+	 andcc	%o1, 4, %g0
+
+	be	2f
+	 mov	%o2, %g1
+
+	EXO2(ld [%o1], %o4)
+	sub	%g1, 4, %g1
+	EXO2(st %o4, [%o0])
+	add	%o1, 4, %o1
+	add	%o0, 4, %o0
+2:
+	andcc	%g1, 0xffffff80, %g7
+	be	3f
+	 andcc	%o0, 4, %g0
+
+	be	ldd_std + 4
+5:
+	MOVE_BIGCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
+	MOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
+	MOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
+	MOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
+80:
+	EXT(5b, 80b, 50f)
+	subcc	%g7, 128, %g7
+	add	%o1, 128, %o1
+	bne	5b
+	 add	%o0, 128, %o0
+3:
+	andcc	%g1, 0x70, %g7
+	be	copy_user_table_end
+	 andcc	%g1, 8, %g0
+
+	sethi	%hi(copy_user_table_end), %o5
+	srl	%g7, 1, %o4
+	add	%g7, %o4, %o4
+	add	%o1, %g7, %o1
+	sub	%o5, %o4, %o5
+	jmpl	%o5 + %lo(copy_user_table_end), %g0
+	 add	%o0, %g7, %o0
+
+copy_user_table:
+	MOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
+	MOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
+	MOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
+	MOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g4, g5)
+	MOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g4, g5)
+	MOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
+	MOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
+copy_user_table_end:
+	EXT(copy_user_table, copy_user_table_end, 51f)
+	be	copy_user_last7
+	 andcc	%g1, 4, %g0
+
+	EX(ldd	[%o1], %g2, and %g1, 0xf)
+	add	%o0, 8, %o0
+	add	%o1, 8, %o1
+	EX(st	%g2, [%o0 - 0x08], and %g1, 0xf)
+	EX2(st	%g3, [%o0 - 0x04], and %g1, 0xf, %g1, sub %g1, 4)
+copy_user_last7:
+	be	1f
+	 andcc	%g1, 2, %g0
+
+	EX(ld	[%o1], %g2, and %g1, 7)
+	add	%o1, 4, %o1
+	EX(st	%g2, [%o0], and %g1, 7)
+	add	%o0, 4, %o0
+1:
+	be	1f
+	 andcc	%g1, 1, %g0
+
+	EX(lduh	[%o1], %g2, and %g1, 3)
+	add	%o1, 2, %o1
+	EX(sth	%g2, [%o0], and %g1, 3)
+	add	%o0, 2, %o0
+1:
+	be	1f
+	 nop
+
+	EX(ldub	[%o1], %g2, add %g0, 1)
+	EX(stb	%g2, [%o0], add %g0, 1)
+1:
+	retl
+ 	 clr	%o0
+
+ldd_std:
+	MOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
+	MOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
+	MOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
+	MOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
+81:
+	EXT(ldd_std, 81b, 52f)
+	subcc	%g7, 128, %g7
+	add	%o1, 128, %o1
+	bne	ldd_std
+	 add	%o0, 128, %o0
+
+	andcc	%g1, 0x70, %g7
+	be	copy_user_table_end
+	 andcc	%g1, 8, %g0
+
+	sethi	%hi(copy_user_table_end), %o5
+	srl	%g7, 1, %o4
+	add	%g7, %o4, %o4
+	add	%o1, %g7, %o1
+	sub	%o5, %o4, %o5
+	jmpl	%o5 + %lo(copy_user_table_end), %g0
+	 add	%o0, %g7, %o0
+
+cannot_optimize:
+	bleu	short_end
+	 cmp	%o5, 2
+
+	bne	byte_chunk
+	 and	%o2, 0xfffffff0, %o3
+	 
+	andcc	%o1, 1, %g0
+	be	10f
+	 nop
+
+	EXO2(ldub [%o1], %g2)
+	add	%o1, 1, %o1
+	EXO2(stb %g2, [%o0])
+	sub	%o2, 1, %o2
+	andcc	%o2, 0xfffffff0, %o3
+	be	short_end
+	 add	%o0, 1, %o0
+10:
+	MOVE_HALFCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
+	MOVE_HALFCHUNK(o1, o0, 0x08, g2, g3, g4, g5)
+82:
+	EXT(10b, 82b, 53f)
+	subcc	%o3, 0x10, %o3
+	add	%o1, 0x10, %o1
+	bne	10b
+	 add	%o0, 0x10, %o0
+	b	2f
+	 and	%o2, 0xe, %o3
+	
+byte_chunk:
+	MOVE_SHORTCHUNK(o1, o0, -0x02, g2, g3)
+	MOVE_SHORTCHUNK(o1, o0, -0x04, g2, g3)
+	MOVE_SHORTCHUNK(o1, o0, -0x06, g2, g3)
+	MOVE_SHORTCHUNK(o1, o0, -0x08, g2, g3)
+	MOVE_SHORTCHUNK(o1, o0, -0x0a, g2, g3)
+	MOVE_SHORTCHUNK(o1, o0, -0x0c, g2, g3)
+	MOVE_SHORTCHUNK(o1, o0, -0x0e, g2, g3)
+	MOVE_SHORTCHUNK(o1, o0, -0x10, g2, g3)
+83:
+	EXT(byte_chunk, 83b, 54f)
+	subcc	%o3, 0x10, %o3
+	add	%o1, 0x10, %o1
+	bne	byte_chunk
+	 add	%o0, 0x10, %o0
+
+short_end:
+	and	%o2, 0xe, %o3
+2:
+	sethi	%hi(short_table_end), %o5
+	sll	%o3, 3, %o4
+	add	%o0, %o3, %o0
+	sub	%o5, %o4, %o5
+	add	%o1, %o3, %o1
+	jmpl	%o5 + %lo(short_table_end), %g0
+	 andcc	%o2, 1, %g0
+84:
+	MOVE_SHORTCHUNK(o1, o0, 0x0c, g2, g3)
+	MOVE_SHORTCHUNK(o1, o0, 0x0a, g2, g3)
+	MOVE_SHORTCHUNK(o1, o0, 0x08, g2, g3)
+	MOVE_SHORTCHUNK(o1, o0, 0x06, g2, g3)
+	MOVE_SHORTCHUNK(o1, o0, 0x04, g2, g3)
+	MOVE_SHORTCHUNK(o1, o0, 0x02, g2, g3)
+	MOVE_SHORTCHUNK(o1, o0, 0x00, g2, g3)
+short_table_end:
+	EXT(84b, short_table_end, 55f)
+	be	1f
+	 nop
+	EX(ldub	[%o1], %g2, add %g0, 1)
+	EX(stb	%g2, [%o0], add %g0, 1)
+1:
+	retl
+ 	 clr	%o0
+
+short_aligned_end:
+	bne	short_end
+	 andcc	%o2, 8, %g0
+
+	be	1f
+	 andcc	%o2, 4, %g0
+
+	EXO2(ld	[%o1 + 0x00], %g2)
+	EXO2(ld	[%o1 + 0x04], %g3)
+	add	%o1, 8, %o1
+	EXO2(st	%g2, [%o0 + 0x00])
+	EX(st	%g3, [%o0 + 0x04], sub %o2, 4)
+	add	%o0, 8, %o0
+1:
+	b	copy_user_last7
+	 mov	%o2, %g1
+
+	.section .fixup,#alloc,#execinstr
+	.align	4
+97:
+	mov	%o2, %g3
+fixupretl:
+	sethi   %hi(PAGE_OFFSET), %g1
+	cmp	%o0, %g1
+	blu	1f
+	 cmp	%o1, %g1
+	bgeu	1f
+	 nop
+	save	%sp, -64, %sp
+	mov	%i0, %o0
+	call	__bzero
+	 mov	%g3, %o1
+	restore
+1:	retl
+	 mov	%g3, %o0
+
+/* exception routine sets %g2 to (broken_insn - first_insn)>>2 */
+50:
+/* This magic counts how many bytes are left when crash in MOVE_BIGCHUNK
+ * happens. This is derived from the amount ldd reads, st stores, etc.
+ * x = g2 % 12;
+ * g3 = g1 + g7 - ((g2 / 12) * 32 + (x < 4) ? 0 : (x - 4) * 4);
+ * o0 += (g2 / 12) * 32;
+ */
+	cmp	%g2, 12
+	add	%o0, %g7, %o0
+	bcs	1f
+	 cmp	%g2, 24
+	bcs	2f
+	 cmp	%g2, 36
+	bcs	3f
+	 nop
+	sub	%g2, 12, %g2
+	sub	%g7, 32, %g7
+3:	sub	%g2, 12, %g2
+	sub	%g7, 32, %g7
+2:	sub	%g2, 12, %g2
+	sub	%g7, 32, %g7
+1:	cmp	%g2, 4
+	bcs,a	60f
+	 clr	%g2
+	sub	%g2, 4, %g2
+	sll	%g2, 2, %g2
+60:	and	%g1, 0x7f, %g3
+	sub	%o0, %g7, %o0
+	add	%g3, %g7, %g3
+	ba	fixupretl
+	 sub	%g3, %g2, %g3
+51:
+/* i = 41 - g2; j = i % 6;
+ * g3 = (g1 & 15) + (i / 6) * 16 + (j < 4) ? (j + 1) * 4 : 16;
+ * o0 -= (i / 6) * 16 + 16;
+ */
+	neg	%g2
+	and	%g1, 0xf, %g1
+	add	%g2, 41, %g2
+	add	%o0, %g1, %o0
+1:	cmp	%g2, 6
+	bcs,a	2f
+	 cmp	%g2, 4
+	add	%g1, 16, %g1
+	b	1b
+	 sub	%g2, 6, %g2
+2:	bcc,a	2f
+	 mov	16, %g2
+	inc	%g2
+	sll	%g2, 2, %g2
+2:	add	%g1, %g2, %g3
+	ba	fixupretl
+	 sub	%o0, %g3, %o0
+52:
+/* g3 = g1 + g7 - (g2 / 8) * 32 + (g2 & 4) ? (g2 & 3) * 8 : 0;
+   o0 += (g2 / 8) * 32 */
+	andn	%g2, 7, %g4
+	add	%o0, %g7, %o0
+	andcc	%g2, 4, %g0
+	and	%g2, 3, %g2
+	sll	%g4, 2, %g4
+	sll	%g2, 3, %g2
+	bne	60b
+	 sub	%g7, %g4, %g7
+	ba	60b
+	 clr	%g2
+53:
+/* g3 = o3 + (o2 & 15) - (g2 & 8) - (g2 & 4) ? (g2 & 3) * 2 : 0;
+   o0 += (g2 & 8) */
+	and	%g2, 3, %g4
+	andcc	%g2, 4, %g0
+	and	%g2, 8, %g2
+	sll	%g4, 1, %g4
+	be	1f
+	 add	%o0, %g2, %o0
+	add	%g2, %g4, %g2
+1:	and	%o2, 0xf, %g3
+	add	%g3, %o3, %g3
+	ba	fixupretl
+	 sub	%g3, %g2, %g3
+54:
+/* g3 = o3 + (o2 & 15) - (g2 / 4) * 2 - (g2 & 2) ? (g2 & 1) : 0;
+   o0 += (g2 / 4) * 2 */
+	srl	%g2, 2, %o4
+	and	%g2, 1, %o5
+	srl	%g2, 1, %g2
+	add	%o4, %o4, %o4
+	and	%o5, %g2, %o5
+	and	%o2, 0xf, %o2
+	add	%o0, %o4, %o0
+	sub	%o3, %o5, %o3
+	sub	%o2, %o4, %o2
+	ba	fixupretl
+	 add	%o2, %o3, %g3
+55:
+/* i = 27 - g2;
+   g3 = (o2 & 1) + i / 4 * 2 + !(i & 3);
+   o0 -= i / 4 * 2 + 1 */
+	neg	%g2
+	and	%o2, 1, %o2
+	add	%g2, 27, %g2
+	srl	%g2, 2, %o5
+	andcc	%g2, 3, %g0
+	mov	1, %g2
+	add	%o5, %o5, %o5
+	be,a	1f
+	 clr	%g2
+1:	add	%g2, %o5, %g3
+	sub	%o0, %g3, %o0
+	ba	fixupretl
+	 add	%g3, %o2, %g3
+
+	.globl  __copy_user_end
+__copy_user_end:
diff --git a/arch/sparc/lib/debuglocks.c b/arch/sparc/lib/debuglocks.c
new file mode 100644
index 0000000..fb18235
--- /dev/null
+++ b/arch/sparc/lib/debuglocks.c
@@ -0,0 +1,202 @@
+/* $Id: debuglocks.c,v 1.11 2001/09/20 00:35:31 davem Exp $
+ * debuglocks.c: Debugging versions of SMP locking primitives.
+ *
+ * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1998-99 Anton Blanchard (anton@progsoc.uts.edu.au)
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/threads.h>	/* For NR_CPUS */
+#include <linux/spinlock.h>
+#include <asm/psr.h>
+#include <asm/system.h>
+
+#ifdef CONFIG_SMP
+
+/* Some notes on how these debugging routines work.  When a lock is acquired
+ * an extra debugging member lock->owner_pc is set to the caller of the lock
+ * acquisition routine.  Right before releasing a lock, the debugging program
+ * counter is cleared to zero.
+ *
+ * Furthermore, since PC's are 4 byte aligned on Sparc, we stuff the CPU
+ * number of the owner in the lowest two bits.
+ */
+
+#define STORE_CALLER(A) __asm__ __volatile__("mov %%i7, %0" : "=r" (A));
+
+static inline void show(char *str, spinlock_t *lock, unsigned long caller)
+{
+	int cpu = smp_processor_id();
+
+	printk("%s(%p) CPU#%d stuck at %08lx, owner PC(%08lx):CPU(%lx)\n",str,
+		lock, cpu, caller, lock->owner_pc & ~3, lock->owner_pc & 3);
+}
+
+static inline void show_read(char *str, rwlock_t *lock, unsigned long caller)
+{
+	int cpu = smp_processor_id();
+
+	printk("%s(%p) CPU#%d stuck at %08lx, owner PC(%08lx):CPU(%lx)\n", str,
+		lock, cpu, caller, lock->owner_pc & ~3, lock->owner_pc & 3);
+}
+
+static inline void show_write(char *str, rwlock_t *lock, unsigned long caller)
+{
+	int cpu = smp_processor_id();
+	int i;
+
+	printk("%s(%p) CPU#%d stuck at %08lx, owner PC(%08lx):CPU(%lx)", str,
+		lock, cpu, caller, lock->owner_pc & ~3, lock->owner_pc & 3);
+
+	for(i = 0; i < NR_CPUS; i++)
+		printk(" reader[%d]=%08lx", i, lock->reader_pc[i]);
+
+	printk("\n");
+}
+
+#undef INIT_STUCK
+#define INIT_STUCK 100000000
+
+void _do_spin_lock(spinlock_t *lock, char *str)
+{
+	unsigned long caller;
+	unsigned long val;
+	int cpu = smp_processor_id();
+	int stuck = INIT_STUCK;
+
+	STORE_CALLER(caller);
+
+again:
+	__asm__ __volatile__("ldstub [%1], %0" : "=r" (val) : "r" (&(lock->lock)));
+	if(val) {
+		while(lock->lock) {
+			if (!--stuck) {
+				show(str, lock, caller);
+				stuck = INIT_STUCK;
+			}
+			barrier();
+		}
+		goto again;
+	}
+	lock->owner_pc = (cpu & 3) | (caller & ~3);
+}
+
+int _spin_trylock(spinlock_t *lock)
+{
+	unsigned long val;
+	unsigned long caller;
+	int cpu = smp_processor_id();
+
+	STORE_CALLER(caller);
+
+	__asm__ __volatile__("ldstub [%1], %0" : "=r" (val) : "r" (&(lock->lock)));
+	if(!val) {
+		/* We got it, record our identity for debugging. */
+		lock->owner_pc = (cpu & 3) | (caller & ~3);
+	}
+	return val == 0;
+}
+
+void _do_spin_unlock(spinlock_t *lock)
+{
+	lock->owner_pc = 0;
+	barrier();
+	lock->lock = 0;
+}
+
+void _do_read_lock(rwlock_t *rw, char *str)
+{
+	unsigned long caller;
+	unsigned long val;
+	int cpu = smp_processor_id();
+	int stuck = INIT_STUCK;
+
+	STORE_CALLER(caller);
+
+wlock_again:
+	__asm__ __volatile__("ldstub [%1 + 3], %0" : "=r" (val) : "r" (&(rw->lock)));
+	if(val) {
+		while(rw->lock & 0xff) {
+			if (!--stuck) {
+				show_read(str, rw, caller);
+				stuck = INIT_STUCK;
+			}
+			barrier();
+		}
+		goto wlock_again;
+	}
+
+	rw->reader_pc[cpu] = caller;
+	barrier();
+	rw->lock++;
+}
+
+void _do_read_unlock(rwlock_t *rw, char *str)
+{
+	unsigned long caller;
+	unsigned long val;
+	int cpu = smp_processor_id();
+	int stuck = INIT_STUCK;
+
+	STORE_CALLER(caller);
+
+wlock_again:
+	__asm__ __volatile__("ldstub [%1 + 3], %0" : "=r" (val) : "r" (&(rw->lock)));
+	if(val) {
+		while(rw->lock & 0xff) {
+			if (!--stuck) {
+				show_read(str, rw, caller);
+				stuck = INIT_STUCK;
+			}
+			barrier();
+		}
+		goto wlock_again;
+	}
+
+	rw->reader_pc[cpu] = 0;
+	barrier();
+	rw->lock -= 0x1ff;
+}
+
+void _do_write_lock(rwlock_t *rw, char *str)
+{
+	unsigned long caller;
+	unsigned long val;
+	int cpu = smp_processor_id();
+	int stuck = INIT_STUCK;
+
+	STORE_CALLER(caller);
+
+wlock_again:
+	__asm__ __volatile__("ldstub [%1 + 3], %0" : "=r" (val) : "r" (&(rw->lock)));
+	if(val) {
+wlock_wait:
+		while(rw->lock) {
+			if (!--stuck) {
+				show_write(str, rw, caller);
+				stuck = INIT_STUCK;
+			}
+			barrier();
+		}
+		goto wlock_again;
+	}
+
+	if (rw->lock & ~0xff) {
+		*(((unsigned char *)&rw->lock)+3) = 0;
+		barrier();
+		goto wlock_wait;
+	}
+
+	barrier();
+	rw->owner_pc = (cpu & 3) | (caller & ~3);
+}
+
+void _do_write_unlock(rwlock_t *rw)
+{
+	rw->owner_pc = 0;
+	barrier();
+	rw->lock = 0;
+}
+
+#endif /* SMP */
diff --git a/arch/sparc/lib/divdi3.S b/arch/sparc/lib/divdi3.S
new file mode 100644
index 0000000..681b368
--- /dev/null
+++ b/arch/sparc/lib/divdi3.S
@@ -0,0 +1,295 @@
+/* Copyright (C) 1989, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING.  If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA.  */
+
+	.data
+	.align 8
+	.globl	__clz_tab
+__clz_tab:
+	.byte	0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5
+	.byte	6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6
+	.byte	7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7
+	.byte	7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7
+	.byte	8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8
+	.byte	8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8
+	.byte	8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8
+	.byte	8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8
+	.size	 __clz_tab,256
+	.global .udiv
+
+	.text
+	.align 4
+	.globl __divdi3
+__divdi3:
+	save %sp,-104,%sp
+	cmp %i0,0
+	bge .LL40
+	mov 0,%l4
+	mov -1,%l4
+	sub %g0,%i1,%o0
+	mov %o0,%o5
+	subcc %g0,%o0,%g0
+	sub %g0,%i0,%o0
+	subx %o0,0,%o4
+	mov %o4,%i0
+	mov %o5,%i1
+.LL40:
+	cmp %i2,0
+	bge .LL84
+	mov %i3,%o4
+	xnor %g0,%l4,%l4
+	sub %g0,%i3,%o0
+	mov %o0,%o3
+	subcc %g0,%o0,%g0
+	sub %g0,%i2,%o0
+	subx %o0,0,%o2
+	mov %o2,%i2
+	mov %o3,%i3
+	mov %i3,%o4
+.LL84:
+	cmp %i2,0
+	bne .LL45
+	mov %i1,%i3
+	cmp %o4,%i0
+	bleu .LL46
+	mov %i3,%o1
+	mov	32,%g1
+	subcc	%i0,%o4,%g0
+1:	bcs	5f
+	 addxcc %o1,%o1,%o1	! shift n1n0 and a q-bit in lsb
+	sub	%i0,%o4,%i0	! this kills msb of n
+	addx	%i0,%i0,%i0	! so this cannot give carry
+	subcc	%g1,1,%g1
+2:	bne	1b
+	 subcc	%i0,%o4,%g0
+	bcs	3f
+	 addxcc %o1,%o1,%o1	! shift n1n0 and a q-bit in lsb
+	b	3f
+	 sub	%i0,%o4,%i0	! this kills msb of n
+4:	sub	%i0,%o4,%i0
+5:	addxcc	%i0,%i0,%i0
+	bcc	2b
+	 subcc	%g1,1,%g1
+! Got carry from n.  Subtract next step to cancel this carry.
+	bne	4b
+	 addcc	%o1,%o1,%o1	! shift n1n0 and a 0-bit in lsb
+	sub	%i0,%o4,%i0
+3:	xnor	%o1,0,%o1
+	b .LL50
+	mov 0,%o2
+.LL46:
+	cmp %o4,0
+	bne .LL85
+	mov %i0,%o2
+	mov 1,%o0
+	call .udiv,0
+	mov 0,%o1
+	mov %o0,%o4
+	mov %i0,%o2
+.LL85:
+	mov 0,%g3
+	mov	32,%g1
+	subcc	%g3,%o4,%g0
+1:	bcs	5f
+	 addxcc %o2,%o2,%o2	! shift n1n0 and a q-bit in lsb
+	sub	%g3,%o4,%g3	! this kills msb of n
+	addx	%g3,%g3,%g3	! so this cannot give carry
+	subcc	%g1,1,%g1
+2:	bne	1b
+	 subcc	%g3,%o4,%g0
+	bcs	3f
+	 addxcc %o2,%o2,%o2	! shift n1n0 and a q-bit in lsb
+	b	3f
+	 sub	%g3,%o4,%g3	! this kills msb of n
+4:	sub	%g3,%o4,%g3
+5:	addxcc	%g3,%g3,%g3
+	bcc	2b
+	 subcc	%g1,1,%g1
+! Got carry from n.  Subtract next step to cancel this carry.
+	bne	4b
+	 addcc	%o2,%o2,%o2	! shift n1n0 and a 0-bit in lsb
+	sub	%g3,%o4,%g3
+3:	xnor	%o2,0,%o2
+	mov %g3,%i0
+	mov %i3,%o1
+	mov	32,%g1
+	subcc	%i0,%o4,%g0
+1:	bcs	5f
+	 addxcc %o1,%o1,%o1	! shift n1n0 and a q-bit in lsb
+	sub	%i0,%o4,%i0	! this kills msb of n
+	addx	%i0,%i0,%i0	! so this cannot give carry
+	subcc	%g1,1,%g1
+2:	bne	1b
+	 subcc	%i0,%o4,%g0
+	bcs	3f
+	 addxcc %o1,%o1,%o1	! shift n1n0 and a q-bit in lsb
+	b	3f
+	 sub	%i0,%o4,%i0	! this kills msb of n
+4:	sub	%i0,%o4,%i0
+5:	addxcc	%i0,%i0,%i0
+	bcc	2b
+	 subcc	%g1,1,%g1
+! Got carry from n.  Subtract next step to cancel this carry.
+	bne	4b
+	 addcc	%o1,%o1,%o1	! shift n1n0 and a 0-bit in lsb
+	sub	%i0,%o4,%i0
+3:	xnor	%o1,0,%o1
+	b .LL86
+	mov %o1,%l1
+.LL45:
+	cmp %i2,%i0
+	bleu .LL51
+	sethi %hi(65535),%o0
+	b .LL78
+	mov 0,%o1
+.LL51:
+	or %o0,%lo(65535),%o0
+	cmp %i2,%o0
+	bgu .LL58
+	mov %i2,%o1
+	cmp %i2,256
+	addx %g0,-1,%o0
+	b .LL64
+	and %o0,8,%o2
+.LL58:
+	sethi %hi(16777215),%o0
+	or %o0,%lo(16777215),%o0
+	cmp %i2,%o0
+	bgu .LL64
+	mov 24,%o2
+	mov 16,%o2
+.LL64:
+	srl %o1,%o2,%o0
+	sethi %hi(__clz_tab),%o1
+	or %o1,%lo(__clz_tab),%o1
+	ldub [%o0+%o1],%o0
+	add %o0,%o2,%o0
+	mov 32,%o1
+	subcc %o1,%o0,%o3
+	bne,a .LL72
+	sub %o1,%o3,%o1
+	cmp %i0,%i2
+	bgu .LL74
+	cmp %i3,%o4
+	blu .LL78
+	mov 0,%o1
+.LL74:
+	b .LL78
+	mov 1,%o1
+.LL72:
+	sll %i2,%o3,%o2
+	srl %o4,%o1,%o0
+	or %o2,%o0,%i2
+	sll %o4,%o3,%o4
+	srl %i0,%o1,%o2
+	sll %i0,%o3,%o0
+	srl %i3,%o1,%o1
+	or %o0,%o1,%i0
+	sll %i3,%o3,%i3
+	mov %i0,%o1
+	mov	32,%g1
+	subcc	%o2,%i2,%g0
+1:	bcs	5f
+	 addxcc %o1,%o1,%o1	! shift n1n0 and a q-bit in lsb
+	sub	%o2,%i2,%o2	! this kills msb of n
+	addx	%o2,%o2,%o2	! so this cannot give carry
+	subcc	%g1,1,%g1
+2:	bne	1b
+	 subcc	%o2,%i2,%g0
+	bcs	3f
+	 addxcc %o1,%o1,%o1	! shift n1n0 and a q-bit in lsb
+	b	3f
+	 sub	%o2,%i2,%o2	! this kills msb of n
+4:	sub	%o2,%i2,%o2
+5:	addxcc	%o2,%o2,%o2
+	bcc	2b
+	 subcc	%g1,1,%g1
+! Got carry from n.  Subtract next step to cancel this carry.
+	bne	4b
+	 addcc	%o1,%o1,%o1	! shift n1n0 and a 0-bit in lsb
+	sub	%o2,%i2,%o2
+3:	xnor	%o1,0,%o1
+	mov %o2,%i0
+	wr	%g0,%o1,%y	! SPARC has 0-3 delay insn after a wr
+	sra	%o4,31,%g2	! Do not move this insn
+	and	%o1,%g2,%g2	! Do not move this insn
+	andcc	%g0,0,%g1	! Do not move this insn
+	mulscc	%g1,%o4,%g1
+	mulscc	%g1,%o4,%g1
+	mulscc	%g1,%o4,%g1
+	mulscc	%g1,%o4,%g1
+	mulscc	%g1,%o4,%g1
+	mulscc	%g1,%o4,%g1
+	mulscc	%g1,%o4,%g1
+	mulscc	%g1,%o4,%g1
+	mulscc	%g1,%o4,%g1
+	mulscc	%g1,%o4,%g1
+	mulscc	%g1,%o4,%g1
+	mulscc	%g1,%o4,%g1
+	mulscc	%g1,%o4,%g1
+	mulscc	%g1,%o4,%g1
+	mulscc	%g1,%o4,%g1
+	mulscc	%g1,%o4,%g1
+	mulscc	%g1,%o4,%g1
+	mulscc	%g1,%o4,%g1
+	mulscc	%g1,%o4,%g1
+	mulscc	%g1,%o4,%g1
+	mulscc	%g1,%o4,%g1
+	mulscc	%g1,%o4,%g1
+	mulscc	%g1,%o4,%g1
+	mulscc	%g1,%o4,%g1
+	mulscc	%g1,%o4,%g1
+	mulscc	%g1,%o4,%g1
+	mulscc	%g1,%o4,%g1
+	mulscc	%g1,%o4,%g1
+	mulscc	%g1,%o4,%g1
+	mulscc	%g1,%o4,%g1
+	mulscc	%g1,%o4,%g1
+	mulscc	%g1,%o4,%g1
+	mulscc	%g1,0,%g1
+	add	%g1,%g2,%o0
+	rd	%y,%o2
+	cmp %o0,%i0
+	bgu,a .LL78
+	add %o1,-1,%o1
+	bne,a .LL50
+	mov 0,%o2
+	cmp %o2,%i3
+	bleu .LL50
+	mov 0,%o2
+	add %o1,-1,%o1
+.LL78:
+	mov 0,%o2
+.LL50:
+	mov %o1,%l1
+.LL86:
+	mov %o2,%l0
+	mov %l0,%i0
+	mov %l1,%i1
+	cmp %l4,0
+	be .LL81
+	sub %g0,%i1,%o0
+	mov %o0,%l3
+	subcc %g0,%o0,%g0
+	sub %g0,%i0,%o0
+	subx %o0,0,%l2
+	mov %l2,%i0
+	mov %l3,%i1
+.LL81:
+	ret
+	restore
diff --git a/arch/sparc/lib/locks.S b/arch/sparc/lib/locks.S
new file mode 100644
index 0000000..95fa484
--- /dev/null
+++ b/arch/sparc/lib/locks.S
@@ -0,0 +1,72 @@
+/* $Id: locks.S,v 1.16 2000/02/26 11:02:47 anton Exp $
+ * locks.S: SMP low-level lock primitives on Sparc.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1998 Anton Blanchard (anton@progsoc.uts.edu.au)
+ * Copyright (C) 1998 Jakub Jelinek   (jj@ultra.linux.cz)
+ */
+
+#include <asm/ptrace.h>
+#include <asm/psr.h>
+#include <asm/smp.h>
+#include <asm/spinlock.h>
+
+	.text
+	.align	4
+
+	/* Read/writer locks, as usual this is overly clever to make it
+	 * as fast as possible.
+	 */
+
+	/* caches... */
+___rw_read_enter_spin_on_wlock:
+	orcc	%g2, 0x0, %g0
+	be,a	___rw_read_enter
+	 ldstub	[%g1 + 3], %g2
+	b	___rw_read_enter_spin_on_wlock
+	 ldub	[%g1 + 3], %g2
+___rw_read_exit_spin_on_wlock:
+	orcc	%g2, 0x0, %g0
+	be,a	___rw_read_exit
+	 ldstub	[%g1 + 3], %g2
+	b	___rw_read_exit_spin_on_wlock
+	 ldub	[%g1 + 3], %g2
+___rw_write_enter_spin_on_wlock:
+	orcc	%g2, 0x0, %g0
+	be,a	___rw_write_enter
+	 ldstub	[%g1 + 3], %g2
+	b	___rw_write_enter_spin_on_wlock
+	 ld	[%g1], %g2
+
+	.globl	___rw_read_enter
+___rw_read_enter:
+	orcc	%g2, 0x0, %g0
+	bne,a	___rw_read_enter_spin_on_wlock
+	 ldub	[%g1 + 3], %g2
+	ld	[%g1], %g2
+	add	%g2, 1, %g2
+	st	%g2, [%g1]
+	retl
+	 mov	%g4, %o7
+
+	.globl	___rw_read_exit
+___rw_read_exit:
+	orcc	%g2, 0x0, %g0
+	bne,a	___rw_read_exit_spin_on_wlock
+	 ldub	[%g1 + 3], %g2
+	ld	[%g1], %g2
+	sub	%g2, 0x1ff, %g2
+	st	%g2, [%g1]
+	retl
+	 mov	%g4, %o7
+
+	.globl	___rw_write_enter
+___rw_write_enter:
+	orcc	%g2, 0x0, %g0
+	bne	___rw_write_enter_spin_on_wlock
+	 ld	[%g1], %g2
+	andncc	%g2, 0xff, %g0
+	bne,a	___rw_write_enter_spin_on_wlock
+	 stb	%g0, [%g1 + 3]
+	retl
+	 mov	%g4, %o7
diff --git a/arch/sparc/lib/lshrdi3.S b/arch/sparc/lib/lshrdi3.S
new file mode 100644
index 0000000..35abf5b
--- /dev/null
+++ b/arch/sparc/lib/lshrdi3.S
@@ -0,0 +1,27 @@
+/* $Id: lshrdi3.S,v 1.1 1999/03/21 06:37:45 davem Exp $ */
+
+	.globl	__lshrdi3
+__lshrdi3:
+	cmp	%o2, 0
+	be	3f
+	 mov	0x20, %g2
+
+	sub	%g2, %o2, %g2
+	cmp	%g2, 0
+	bg	1f
+	 srl	%o0, %o2, %o4
+
+	clr	%o4
+	neg	%g2
+	b	2f
+	 srl	%o0, %g2, %o5
+1:
+	sll  %o0, %g2, %g3
+	srl  %o1, %o2, %g2
+	or  %g2, %g3, %o5
+2:
+	mov  %o4, %o0
+	mov  %o5, %o1
+3:
+	retl 
+	 nop 
diff --git a/arch/sparc/lib/memcmp.S b/arch/sparc/lib/memcmp.S
new file mode 100644
index 0000000..cb4bdb0
--- /dev/null
+++ b/arch/sparc/lib/memcmp.S
@@ -0,0 +1,312 @@
+	.text
+	.align 4
+	.global __memcmp, memcmp
+__memcmp:
+memcmp:
+#if 1
+	cmp	%o2, 0
+	ble	L3
+	 mov	0, %g3
+L5:
+	ldub	[%o0], %g2
+	ldub	[%o1], %g3
+	sub	%g2, %g3, %g2
+	mov	%g2, %g3
+	sll	%g2, 24, %g2
+
+	cmp	%g2, 0
+	bne	L3
+	 add	%o0, 1, %o0
+
+	add	%o2, -1, %o2
+
+	cmp	%o2, 0
+	bg	L5
+	 add	%o1, 1, %o1
+L3:
+	sll	%g3, 24, %o0
+	sra	%o0, 24, %o0
+
+	retl
+	 nop
+#else
+	save	%sp, -104, %sp
+	mov	%i2, %o4
+	mov	%i0, %o0
+
+	cmp	%o4, 15
+	ble	L72
+	 mov	%i1, %i2
+
+	andcc	%i2, 3, %g0
+	be	L161
+	 andcc	%o0, 3, %g2
+L75:
+	ldub	[%o0], %g3
+	ldub	[%i2], %g2
+	add	%o0,1, %o0
+
+	subcc	%g3, %g2, %i0
+	bne	L156
+	 add	%i2, 1, %i2
+
+	andcc	%i2, 3, %g0
+	bne	L75
+	 add	%o4, -1, %o4
+
+	andcc	%o0, 3, %g2
+L161:
+	bne,a	L78
+	 mov	%i2, %i1
+
+	mov	%o0, %i5
+	mov	%i2, %i3
+	srl	%o4, 2, %i4
+
+	cmp	%i4, 0
+	bge	L93
+	 mov	%i4, %g2
+
+	add %i4, 3, %g2
+L93:
+	sra	%g2, 2, %g2
+	sll	%g2, 2, %g2
+	sub	%i4, %g2, %g2
+
+	cmp	%g2, 1
+	be,a	L88
+	 add	%o0, 4, %i5
+
+	bg	L94
+	 cmp	%g2, 2
+
+	cmp	%g2, 0
+	be,a	L86
+	 ld	[%o0], %g3
+
+	b	L162
+	 ld	[%i5], %g3
+L94:
+	be	L81
+	 cmp	%g2, 3
+
+	be,a	L83
+	 add	%o0, -4, %i5
+
+	b	L162
+	 ld	[%i5], %g3
+L81:
+	add	%o0, -8, %i5
+	ld	[%o0], %g3
+	add	%i2, -8, %i3
+	ld	[%i2], %g2
+
+	b	L82
+	 add	%i4, 2, %i4
+L83:
+	ld	[%o0], %g4
+	add	%i2, -4, %i3
+	ld	[%i2], %g1
+
+	b	L84
+	 add	%i4, 1, %i4
+L86:
+	b	L87
+	 ld	[%i2], %g2
+L88:
+	add	%i2, 4, %i3
+	ld	[%o0], %g4
+	add	%i4, -1, %i4
+	ld	[%i2], %g1
+L95:
+	ld	[%i5], %g3
+L162:
+	cmp	%g4, %g1
+	be	L87
+	 ld	[%i3], %g2
+
+	cmp	%g4, %g1
+L163:
+	bleu	L114
+	 mov	-1, %i0
+
+	b	L114
+	 mov	1, %i0
+L87:
+	ld	[%i5 + 4], %g4
+	cmp	%g3, %g2
+	bne	L163
+	 ld	[%i3 + 4], %g1
+L84:
+	ld	[%i5 + 8], %g3
+
+	cmp	%g4, %g1
+	bne	L163
+	 ld	[%i3 + 8], %g2
+L82:
+	ld	[%i5 + 12], %g4
+	cmp	%g3, %g2
+	bne	L163
+	 ld	[%i3 + 12], %g1
+
+	add	%i5, 16, %i5
+
+	addcc	%i4, -4, %i4
+	bne	L95
+	 add	%i3, 16, %i3
+
+	cmp	%g4, %g1
+	bne	L163
+	 nop
+
+	b	L114
+	 mov	0, %i0
+L78:
+	srl	%o4, 2, %i0
+	and	%o0, -4, %i3
+	orcc	%i0, %g0, %g3
+	sll	%g2, 3, %o7
+	mov	32, %g2
+
+	bge	L129
+	 sub	%g2, %o7, %o1
+
+	add	%i0, 3, %g3
+L129:
+	sra	%g3, 2, %g2
+	sll	%g2, 2, %g2
+	sub	%i0, %g2, %g2
+
+	cmp	%g2, 1
+	be,a	L124
+	 ld	[%i3], %o3
+
+	bg	L130
+	 cmp	%g2, 2
+
+	cmp	%g2, 0
+	be,a	L122
+	 ld	[%i3], %o2
+
+	b	L164
+	sll	%o3, %o7, %g3
+L130:
+	be	L117
+	 cmp	%g2, 3
+
+	be,a	L119
+	 ld	[%i3], %g1
+
+	b	L164
+	 sll	%o3, %o7, %g3
+L117:
+	ld	[%i3], %g4
+	add	%i2, -8, %i1
+	ld	[%i3 + 4], %o3
+	add	%i0, 2, %i0
+	ld	[%i2], %i4
+
+	b	L118
+	 add	%i3, -4, %i3
+L119:
+	ld	[%i3 + 4], %g4
+	add	%i2, -4, %i1
+	ld	[%i2], %i5
+
+	b	L120
+	 add	%i0, 1, %i0
+L122:
+	ld	[%i3 + 4], %g1
+	ld	[%i2], %i4
+
+	b	L123
+	 add	%i3, 4, %i3
+L124:
+	add	%i2, 4, %i1
+	ld	[%i3 + 4], %o2
+	add	%i0, -1, %i0
+	ld	[%i2], %i5
+	add	%i3, 8, %i3
+L131:
+	sll	%o3, %o7, %g3
+L164:
+	srl	%o2, %o1, %g2
+	ld	[%i3], %g1
+	or	%g3, %g2, %g3
+
+	cmp	%g3, %i5
+	bne	L163
+	 ld	[%i1], %i4
+L123:
+	sll	%o2, %o7, %g3
+	srl	%g1, %o1, %g2
+	ld	[%i3 + 4], %g4
+	or	%g3, %g2, %g3
+
+	cmp	%g3, %i4
+	bne	L163
+	 ld	[%i1 + 4], %i5
+L120:
+	sll	%g1, %o7, %g3
+	srl	%g4, %o1, %g2
+	ld	[%i3 + 8], %o3
+	or	%g3, %g2, %g3
+
+	cmp	%g3, %i5
+	bne	L163
+	 ld	[%i1 + 8], %i4
+L118:
+	sll	%g4, %o7, %g3
+	srl	%o3, %o1, %g2
+	ld	[%i3 + 12], %o2
+	or	%g3, %g2, %g3
+
+	cmp	%g3, %i4
+	bne	L163
+	 ld	[%i1 + 12], %i5
+
+	add	%i3, 16, %i3
+	addcc	%i0, -4, %i0
+	bne	L131
+	 add	%i1, 16, %i1
+
+	sll	%o3, %o7, %g3
+	srl	%o2, %o1, %g2
+	or	%g3, %g2, %g3
+
+	cmp	%g3, %i5
+	be,a	L114
+	 mov	0, %i0
+
+	b,a L163
+L114:
+	cmp	%i0, 0
+	bne	L156
+	 and	%o4, -4, %g2
+
+	add	%o0, %g2, %o0
+	add	%i2, %g2, %i2
+	and	%o4, 3, %o4
+L72:
+	cmp	%o4, 0
+	be	L156
+	 mov	0, %i0
+
+	ldub	[%o0], %g3
+L165:
+	ldub	[%i2], %g2
+	add	%o0, 1, %o0
+
+	subcc	%g3, %g2, %i0
+	bne	L156
+	 add	%i2, 1, %i2
+
+	addcc	%o4, -1, %o4
+	bne,a	L165
+	 ldub	[%o0], %g3
+
+	mov	0, %i0
+L156:
+	ret
+	restore
+#endif
diff --git a/arch/sparc/lib/memcpy.S b/arch/sparc/lib/memcpy.S
new file mode 100644
index 0000000..ce10bc8
--- /dev/null
+++ b/arch/sparc/lib/memcpy.S
@@ -0,0 +1,1150 @@
+/* memcpy.S: Sparc optimized memcpy and memmove code
+ * Hand optimized from GNU libc's memcpy and memmove
+ * Copyright (C) 1991,1996 Free Software Foundation
+ * Copyright (C) 1995 Linus Torvalds (Linus.Torvalds@helsinki.fi)
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
+ * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#ifdef __KERNEL__
+
+#define FUNC(x) 											\
+	.globl	x;		\
+	.type	x,@function;	\
+	.align	4;											\
+x:
+
+#undef FASTER_REVERSE
+#undef FASTER_NONALIGNED
+#define FASTER_ALIGNED
+
+/* In kernel these functions don't return a value.
+ * One should use macros in asm/string.h for that purpose.
+ * We return 0, so that bugs are more apparent.
+ */
+#define SETUP_RETL
+#define RETL_INSN	clr	%o0
+
+#else
+
+/* libc */
+
+#include "DEFS.h"
+
+#define FASTER_REVERSE
+#define FASTER_NONALIGNED
+#define FASTER_ALIGNED
+
+#define SETUP_RETL	mov	%o0, %g6
+#define RETL_INSN	mov	%g6, %o0
+
+#endif
+
+/* Both these macros have to start with exactly the same insn */
+#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
+	ldd	[%src + (offset) + 0x00], %t0; \
+	ldd	[%src + (offset) + 0x08], %t2; \
+	ldd	[%src + (offset) + 0x10], %t4; \
+	ldd	[%src + (offset) + 0x18], %t6; \
+	st	%t0, [%dst + (offset) + 0x00]; \
+	st	%t1, [%dst + (offset) + 0x04]; \
+	st	%t2, [%dst + (offset) + 0x08]; \
+	st	%t3, [%dst + (offset) + 0x0c]; \
+	st	%t4, [%dst + (offset) + 0x10]; \
+	st	%t5, [%dst + (offset) + 0x14]; \
+	st	%t6, [%dst + (offset) + 0x18]; \
+	st	%t7, [%dst + (offset) + 0x1c];
+
+#define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
+	ldd	[%src + (offset) + 0x00], %t0; \
+	ldd	[%src + (offset) + 0x08], %t2; \
+	ldd	[%src + (offset) + 0x10], %t4; \
+	ldd	[%src + (offset) + 0x18], %t6; \
+	std	%t0, [%dst + (offset) + 0x00]; \
+	std	%t2, [%dst + (offset) + 0x08]; \
+	std	%t4, [%dst + (offset) + 0x10]; \
+	std	%t6, [%dst + (offset) + 0x18];
+
+#define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
+	ldd	[%src - (offset) - 0x10], %t0; \
+	ldd	[%src - (offset) - 0x08], %t2; \
+	st	%t0, [%dst - (offset) - 0x10]; \
+	st	%t1, [%dst - (offset) - 0x0c]; \
+	st	%t2, [%dst - (offset) - 0x08]; \
+	st	%t3, [%dst - (offset) - 0x04];
+
+#define MOVE_LASTALIGNCHUNK(src, dst, offset, t0, t1, t2, t3) \
+	ldd	[%src - (offset) - 0x10], %t0; \
+	ldd	[%src - (offset) - 0x08], %t2; \
+	std	%t0, [%dst - (offset) - 0x10]; \
+	std	%t2, [%dst - (offset) - 0x08];
+
+#define MOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
+	ldub	[%src - (offset) - 0x02], %t0; \
+	ldub	[%src - (offset) - 0x01], %t1; \
+	stb	%t0, [%dst - (offset) - 0x02]; \
+	stb	%t1, [%dst - (offset) - 0x01];
+
+/* Both these macros have to start with exactly the same insn */
+#define RMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
+	ldd	[%src - (offset) - 0x20], %t0; \
+	ldd	[%src - (offset) - 0x18], %t2; \
+	ldd	[%src - (offset) - 0x10], %t4; \
+	ldd	[%src - (offset) - 0x08], %t6; \
+	st	%t0, [%dst - (offset) - 0x20]; \
+	st	%t1, [%dst - (offset) - 0x1c]; \
+	st	%t2, [%dst - (offset) - 0x18]; \
+	st	%t3, [%dst - (offset) - 0x14]; \
+	st	%t4, [%dst - (offset) - 0x10]; \
+	st	%t5, [%dst - (offset) - 0x0c]; \
+	st	%t6, [%dst - (offset) - 0x08]; \
+	st	%t7, [%dst - (offset) - 0x04];
+
+#define RMOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
+	ldd	[%src - (offset) - 0x20], %t0; \
+	ldd	[%src - (offset) - 0x18], %t2; \
+	ldd	[%src - (offset) - 0x10], %t4; \
+	ldd	[%src - (offset) - 0x08], %t6; \
+	std	%t0, [%dst - (offset) - 0x20]; \
+	std	%t2, [%dst - (offset) - 0x18]; \
+	std	%t4, [%dst - (offset) - 0x10]; \
+	std	%t6, [%dst - (offset) - 0x08];
+
+#define RMOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
+	ldd	[%src + (offset) + 0x00], %t0; \
+	ldd	[%src + (offset) + 0x08], %t2; \
+	st	%t0, [%dst + (offset) + 0x00]; \
+	st	%t1, [%dst + (offset) + 0x04]; \
+	st	%t2, [%dst + (offset) + 0x08]; \
+	st	%t3, [%dst + (offset) + 0x0c];
+
+#define RMOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
+	ldub	[%src + (offset) + 0x00], %t0; \
+	ldub	[%src + (offset) + 0x01], %t1; \
+	stb	%t0, [%dst + (offset) + 0x00]; \
+	stb	%t1, [%dst + (offset) + 0x01];
+
+#define SMOVE_CHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \
+	ldd	[%src + (offset) + 0x00], %t0; \
+	ldd	[%src + (offset) + 0x08], %t2; \
+	srl	%t0, shir, %t5; \
+	srl	%t1, shir, %t6; \
+	sll	%t0, shil, %t0; \
+	or	%t5, %prev, %t5; \
+	sll	%t1, shil, %prev; \
+	or	%t6, %t0, %t0; \
+	srl	%t2, shir, %t1; \
+	srl	%t3, shir, %t6; \
+	sll	%t2, shil, %t2; \
+	or	%t1, %prev, %t1; \
+	std	%t4, [%dst + (offset) + (offset2) - 0x04]; \
+	std	%t0, [%dst + (offset) + (offset2) + 0x04]; \
+	sll	%t3, shil, %prev; \
+	or	%t6, %t2, %t4;
+
+#define SMOVE_ALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \
+	ldd	[%src + (offset) + 0x00], %t0; \
+	ldd	[%src + (offset) + 0x08], %t2; \
+	srl	%t0, shir, %t4;	\
+	srl	%t1, shir, %t5;	\
+	sll	%t0, shil, %t6;	\
+	or	%t4, %prev, %t0; \
+	sll	%t1, shil, %prev; \
+	or	%t5, %t6, %t1; \
+	srl	%t2, shir, %t4;	\
+	srl	%t3, shir, %t5;	\
+	sll	%t2, shil, %t6; \
+	or	%t4, %prev, %t2; \
+	sll	%t3, shil, %prev; \
+	or	%t5, %t6, %t3; \
+	std	%t0, [%dst + (offset) + (offset2) + 0x00]; \
+	std	%t2, [%dst + (offset) + (offset2) + 0x08];
+
+	.text
+	.align	4
+
+#ifdef FASTER_REVERSE
+
+70:	/* rdword_align */
+
+	andcc		%o1, 1, %g0
+	be		4f
+	 andcc		%o1, 2, %g0
+
+	ldub		[%o1 - 1], %g2
+	sub		%o1, 1, %o1
+	stb		%g2, [%o0 - 1]
+	sub		%o2, 1, %o2
+	be		3f
+	 sub		%o0, 1, %o0
+4:
+	lduh		[%o1 - 2], %g2
+	sub		%o1, 2, %o1
+	sth		%g2, [%o0 - 2]
+	sub		%o2, 2, %o2
+	b		3f
+	 sub		%o0, 2, %o0
+
+#endif /* FASTER_REVERSE */
+
+0:
+	retl
+	 nop		! Only bcopy returns here and it retuns void...
+
+#ifdef __KERNEL__
+FUNC(amemmove)
+FUNC(__memmove)
+#endif
+FUNC(memmove)
+	cmp		%o0, %o1
+	SETUP_RETL
+	bleu		9f
+	 sub		%o0, %o1, %o4
+
+	add		%o1, %o2, %o3
+	cmp		%o3, %o0
+	bleu		0f
+	 andcc		%o4, 3, %o5
+
+#ifndef FASTER_REVERSE
+
+	add		%o1, %o2, %o1
+	add		%o0, %o2, %o0
+	sub		%o1, 1, %o1
+	sub		%o0, 1, %o0
+	
+1:	/* reverse_bytes */
+
+	ldub		[%o1], %o4
+	subcc		%o2, 1, %o2
+	stb		%o4, [%o0]
+	sub		%o1, 1, %o1
+	bne		1b
+	 sub		%o0, 1, %o0
+
+	retl
+	 RETL_INSN
+
+#else /* FASTER_REVERSE */
+
+	add		%o1, %o2, %o1
+	add		%o0, %o2, %o0
+	bne		77f
+	 cmp		%o2, 15
+	bleu		91f
+	 andcc		%o1, 3, %g0
+	bne		70b
+3:
+	 andcc		%o1, 4, %g0
+
+	be		2f
+	 mov		%o2, %g1
+
+	ld		[%o1 - 4], %o4
+	sub		%g1, 4, %g1
+	st		%o4, [%o0 - 4]
+	sub		%o1, 4, %o1
+	sub		%o0, 4, %o0
+2:
+	andcc		%g1, 0xffffff80, %g7
+	be		3f
+	 andcc		%o0, 4, %g0
+
+	be		74f + 4
+5:
+	RMOVE_BIGCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
+	RMOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
+	RMOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
+	RMOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
+	subcc		%g7, 128, %g7
+	sub		%o1, 128, %o1
+	bne		5b
+	 sub		%o0, 128, %o0
+3:
+	andcc		%g1, 0x70, %g7
+	be		72f
+	 andcc		%g1, 8, %g0
+
+	sethi		%hi(72f), %o5
+	srl		%g7, 1, %o4
+	add		%g7, %o4, %o4
+	sub		%o1, %g7, %o1
+	sub		%o5, %o4, %o5
+	jmpl		%o5 + %lo(72f), %g0
+	 sub		%o0, %g7, %o0
+
+71:	/* rmemcpy_table */
+	RMOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
+	RMOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
+	RMOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
+	RMOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g4, g5)
+	RMOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g4, g5)
+	RMOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
+	RMOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
+
+72:	/* rmemcpy_table_end */
+
+	be		73f
+	 andcc		%g1, 4, %g0
+
+	ldd		[%o1 - 0x08], %g2
+	sub		%o0, 8, %o0
+	sub		%o1, 8, %o1
+	st		%g2, [%o0]
+	st		%g3, [%o0 + 0x04]
+
+73:	/* rmemcpy_last7 */
+
+	be		1f
+	 andcc		%g1, 2, %g0
+
+	ld		[%o1 - 4], %g2
+	sub		%o1, 4, %o1
+	st		%g2, [%o0 - 4]
+	sub		%o0, 4, %o0
+1:
+	be		1f
+	 andcc		%g1, 1, %g0
+
+	lduh		[%o1 - 2], %g2
+	sub		%o1, 2, %o1
+	sth		%g2, [%o0 - 2]
+	sub		%o0, 2, %o0
+1:
+	be		1f
+	 nop
+
+	ldub		[%o1 - 1], %g2
+	stb		%g2, [%o0 - 1]
+1:
+	retl
+ 	 RETL_INSN
+
+74:	/* rldd_std */
+	RMOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
+	RMOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
+	RMOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
+	RMOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
+	subcc		%g7, 128, %g7
+	sub		%o1, 128, %o1
+	bne		74b
+	 sub		%o0, 128, %o0
+
+	andcc		%g1, 0x70, %g7
+	be		72b
+	 andcc		%g1, 8, %g0
+
+	sethi		%hi(72b), %o5
+	srl		%g7, 1, %o4
+	add		%g7, %o4, %o4
+	sub		%o1, %g7, %o1
+	sub		%o5, %o4, %o5
+	jmpl		%o5 + %lo(72b), %g0
+	 sub		%o0, %g7, %o0
+
+75:	/* rshort_end */
+
+	and		%o2, 0xe, %o3
+2:
+	sethi		%hi(76f), %o5
+	sll		%o3, 3, %o4
+	sub		%o0, %o3, %o0
+	sub		%o5, %o4, %o5
+	sub		%o1, %o3, %o1
+	jmpl		%o5 + %lo(76f), %g0
+	 andcc		%o2, 1, %g0
+
+	RMOVE_SHORTCHUNK(o1, o0, 0x0c, g2, g3)
+	RMOVE_SHORTCHUNK(o1, o0, 0x0a, g2, g3)
+	RMOVE_SHORTCHUNK(o1, o0, 0x08, g2, g3)
+	RMOVE_SHORTCHUNK(o1, o0, 0x06, g2, g3)
+	RMOVE_SHORTCHUNK(o1, o0, 0x04, g2, g3)
+	RMOVE_SHORTCHUNK(o1, o0, 0x02, g2, g3)
+	RMOVE_SHORTCHUNK(o1, o0, 0x00, g2, g3)
+
+76:	/* rshort_table_end */
+
+	be		1f
+	 nop
+	ldub		[%o1 - 1], %g2
+	stb		%g2, [%o0 - 1]
+1:
+	retl
+ 	 RETL_INSN
+
+91:	/* rshort_aligned_end */
+
+	bne		75b
+	 andcc		%o2, 8, %g0
+
+	be		1f
+	 andcc		%o2, 4, %g0
+
+	ld		[%o1 - 0x08], %g2
+	ld		[%o1 - 0x04], %g3
+	sub		%o1, 8, %o1
+	st		%g2, [%o0 - 0x08]
+	st		%g3, [%o0 - 0x04]
+	sub		%o0, 8, %o0
+1:
+	b		73b
+	 mov		%o2, %g1
+
+77:	/* rnon_aligned */
+	cmp		%o2, 15
+	bleu		75b
+	 andcc		%o0, 3, %g0
+	be		64f
+	 andcc		%o0, 1, %g0
+	be		63f
+	 andcc		%o0, 2, %g0
+	ldub		[%o1 - 1], %g5
+	sub		%o1, 1, %o1
+	stb		%g5, [%o0 - 1]
+	sub		%o0, 1, %o0
+	be		64f
+	 sub		%o2, 1, %o2
+63:
+	ldub		[%o1 - 1], %g5
+	sub		%o1, 2, %o1
+	stb		%g5, [%o0 - 1]
+	sub		%o0, 2, %o0
+	ldub		[%o1], %g5
+	sub		%o2, 2, %o2
+	stb		%g5, [%o0]
+64:	
+	and		%o1, 3, %g2
+	and		%o1, -4, %o1
+	and		%o2, 0xc, %g3
+	add		%o1, 4, %o1
+	cmp		%g3, 4
+	sll		%g2, 3, %g4
+	mov		32, %g2
+	be		4f
+	 sub		%g2, %g4, %g7
+
+	blu		3f
+	 cmp		%g3, 8
+
+	be		2f
+	 srl		%o2, 2, %g3
+
+	ld		[%o1 - 4], %o3
+	add		%o0, -8, %o0
+	ld		[%o1 - 8], %o4
+	add		%o1, -16, %o1
+	b		7f
+	 add		%g3, 1, %g3
+2:
+	ld		[%o1 - 4], %o4
+	add		%o0, -4, %o0
+	ld		[%o1 - 8], %g1
+	add		%o1, -12, %o1
+	b		8f
+	 add		%g3, 2, %g3
+3:
+	ld		[%o1 - 4], %o5
+	add		%o0, -12, %o0
+	ld		[%o1 - 8], %o3
+	add		%o1, -20, %o1
+	b		6f
+	 srl		%o2, 2, %g3
+4:
+	ld		[%o1 - 4], %g1
+	srl		%o2, 2, %g3
+	ld		[%o1 - 8], %o5
+	add		%o1, -24, %o1
+	add		%o0, -16, %o0
+	add		%g3, -1, %g3
+
+	ld		[%o1 + 12], %o3
+5:
+	sll		%o5, %g4, %g2
+	srl		%g1, %g7, %g5
+	or		%g2, %g5, %g2
+	st		%g2, [%o0 + 12]
+6:
+	ld		[%o1 + 8], %o4
+	sll		%o3, %g4, %g2
+	srl		%o5, %g7, %g5
+	or		%g2, %g5, %g2
+	st		%g2, [%o0 + 8]
+7:
+	ld		[%o1 + 4], %g1
+	sll		%o4, %g4, %g2
+	srl		%o3, %g7, %g5
+	or		%g2, %g5, %g2
+	st		%g2, [%o0 + 4]
+8:
+	ld		[%o1], %o5
+	sll		%g1, %g4, %g2
+	srl		%o4, %g7, %g5
+	addcc		%g3, -4, %g3
+	or		%g2, %g5, %g2
+	add		%o1, -16, %o1
+	st		%g2, [%o0]
+	add		%o0, -16, %o0
+	bne,a		5b	
+	 ld		[%o1 + 12], %o3
+	sll		%o5, %g4, %g2
+	srl		%g1, %g7, %g5
+	srl		%g4, 3, %g3
+	or		%g2, %g5, %g2
+	add		%o1, %g3, %o1
+	andcc		%o2, 2, %g0
+	st		%g2, [%o0 + 12]
+	be		1f
+	 andcc		%o2, 1, %g0
+	
+	ldub		[%o1 + 15], %g5
+	add		%o1, -2, %o1
+	stb		%g5, [%o0 + 11]
+	add		%o0, -2, %o0
+	ldub		[%o1 + 16], %g5
+	stb		%g5, [%o0 + 12]
+1:
+	be		1f
+	 nop
+	ldub		[%o1 + 15], %g5
+	stb		%g5, [%o0 + 11]
+1:
+	retl
+	 RETL_INSN
+
+#endif /* FASTER_REVERSE */
+
+/* NOTE: This code is executed just for the cases,
+         where %src (=%o1) & 3 is != 0.
+	 We need to align it to 4. So, for (%src & 3)
+	 1 we need to do ldub,lduh
+	 2 lduh
+	 3 just ldub
+         so even if it looks weird, the branches
+         are correct here. -jj
+ */
+78:	/* dword_align */
+
+	andcc		%o1, 1, %g0
+	be		4f
+	 andcc		%o1, 2, %g0
+
+	ldub		[%o1], %g2
+	add		%o1, 1, %o1
+	stb		%g2, [%o0]
+	sub		%o2, 1, %o2
+	bne		3f
+	 add		%o0, 1, %o0
+4:
+	lduh		[%o1], %g2
+	add		%o1, 2, %o1
+	sth		%g2, [%o0]
+	sub		%o2, 2, %o2
+	b		3f
+	 add		%o0, 2, %o0
+
+#ifdef __KERNEL__
+FUNC(__memcpy)
+#endif
+FUNC(memcpy)	/* %o0=dst %o1=src %o2=len */
+
+	sub		%o0, %o1, %o4
+	SETUP_RETL
+9:
+	andcc		%o4, 3, %o5
+0:
+	bne		86f
+	 cmp		%o2, 15
+
+	bleu		90f
+	 andcc		%o1, 3, %g0
+
+	bne		78b
+3:
+	 andcc		%o1, 4, %g0
+
+	be		2f
+	 mov		%o2, %g1
+
+	ld		[%o1], %o4
+	sub		%g1, 4, %g1
+	st		%o4, [%o0]
+	add		%o1, 4, %o1
+	add		%o0, 4, %o0
+2:
+	andcc		%g1, 0xffffff80, %g7
+	be		3f
+	 andcc		%o0, 4, %g0
+
+	be		82f + 4
+5:
+	MOVE_BIGCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
+	MOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
+	MOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
+	MOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
+	subcc		%g7, 128, %g7
+	add		%o1, 128, %o1
+	bne		5b
+	 add		%o0, 128, %o0
+3:
+	andcc		%g1, 0x70, %g7
+	be		80f
+	 andcc		%g1, 8, %g0
+
+	sethi		%hi(80f), %o5
+	srl		%g7, 1, %o4
+	add		%g7, %o4, %o4
+	add		%o1, %g7, %o1
+	sub		%o5, %o4, %o5
+	jmpl		%o5 + %lo(80f), %g0
+	 add		%o0, %g7, %o0
+
+79:	/* memcpy_table */
+
+	MOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
+	MOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
+	MOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
+	MOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g4, g5)
+	MOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g4, g5)
+	MOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
+	MOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
+
+80:	/* memcpy_table_end */
+	be		81f
+	 andcc		%g1, 4, %g0
+
+	ldd		[%o1], %g2
+	add		%o0, 8, %o0
+	st		%g2, [%o0 - 0x08]
+	add		%o1, 8, %o1
+	st		%g3, [%o0 - 0x04]
+
+81:	/* memcpy_last7 */
+
+	be		1f
+	 andcc		%g1, 2, %g0
+
+	ld		[%o1], %g2
+	add		%o1, 4, %o1
+	st		%g2, [%o0]
+	add		%o0, 4, %o0
+1:
+	be		1f
+	 andcc		%g1, 1, %g0
+
+	lduh		[%o1], %g2
+	add		%o1, 2, %o1
+	sth		%g2, [%o0]
+	add		%o0, 2, %o0
+1:
+	be		1f
+	 nop
+
+	ldub		[%o1], %g2
+	stb		%g2, [%o0]
+1:
+	retl
+ 	 RETL_INSN
+
+82:	/* ldd_std */
+	MOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
+	MOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
+	MOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
+	MOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
+	subcc		%g7, 128, %g7
+	add		%o1, 128, %o1
+	bne		82b
+	 add		%o0, 128, %o0
+
+#ifndef FASTER_ALIGNED
+
+	andcc		%g1, 0x70, %g7
+	be		80b
+	 andcc		%g1, 8, %g0
+
+	sethi		%hi(80b), %o5
+	srl		%g7, 1, %o4
+	add		%g7, %o4, %o4
+	add		%o1, %g7, %o1
+	sub		%o5, %o4, %o5
+	jmpl		%o5 + %lo(80b), %g0
+	 add		%o0, %g7, %o0
+
+#else /* FASTER_ALIGNED */
+
+	andcc		%g1, 0x70, %g7
+	be		84f
+	 andcc		%g1, 8, %g0
+
+	sethi		%hi(84f), %o5
+	add		%o1, %g7, %o1
+	sub		%o5, %g7, %o5
+	jmpl		%o5 + %lo(84f), %g0
+	 add		%o0, %g7, %o0
+
+83:	/* amemcpy_table */
+
+	MOVE_LASTALIGNCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
+	MOVE_LASTALIGNCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
+	MOVE_LASTALIGNCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
+	MOVE_LASTALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5)
+	MOVE_LASTALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5)
+	MOVE_LASTALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
+	MOVE_LASTALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
+
+84:	/* amemcpy_table_end */
+	be		85f
+	 andcc		%g1, 4, %g0
+
+	ldd		[%o1], %g2
+	add		%o0, 8, %o0
+	std		%g2, [%o0 - 0x08]
+	add		%o1, 8, %o1
+85:	/* amemcpy_last7 */
+	be		1f
+	 andcc		%g1, 2, %g0
+
+	ld		[%o1], %g2
+	add		%o1, 4, %o1
+	st		%g2, [%o0]
+	add		%o0, 4, %o0
+1:
+	be		1f
+	 andcc		%g1, 1, %g0
+
+	lduh		[%o1], %g2
+	add		%o1, 2, %o1
+	sth		%g2, [%o0]
+	add		%o0, 2, %o0
+1:
+	be		1f
+	 nop
+
+	ldub		[%o1], %g2
+	stb		%g2, [%o0]
+1:
+	retl
+ 	 RETL_INSN
+
+#endif /* FASTER_ALIGNED */
+
+86:	/* non_aligned */
+	cmp		%o2, 6
+	bleu		88f
+
+#ifdef FASTER_NONALIGNED
+
+	 cmp		%o2, 256
+	bcc		87f
+
+#endif /* FASTER_NONALIGNED */
+
+	 andcc		%o0, 3, %g0
+	be		61f
+	 andcc		%o0, 1, %g0
+	be		60f
+	 andcc		%o0, 2, %g0
+
+	ldub		[%o1], %g5
+	add		%o1, 1, %o1
+	stb		%g5, [%o0]
+	sub		%o2, 1, %o2
+	bne		61f
+	 add		%o0, 1, %o0
+60:
+	ldub		[%o1], %g3
+	add		%o1, 2, %o1
+	stb		%g3, [%o0]
+	sub		%o2, 2, %o2
+	ldub		[%o1 - 1], %g3
+	add		%o0, 2, %o0
+	stb		%g3, [%o0 - 1]
+61:
+	and		%o1, 3, %g2
+	and		%o2, 0xc, %g3
+	and		%o1, -4, %o1
+	cmp		%g3, 4
+	sll		%g2, 3, %g4
+	mov		32, %g2
+	be		4f
+	 sub		%g2, %g4, %g7
+	
+	blu		3f
+	 cmp		%g3, 0x8
+
+	be		2f
+	 srl		%o2, 2, %g3
+
+	ld		[%o1], %o3
+	add		%o0, -8, %o0
+	ld		[%o1 + 4], %o4
+	b		8f
+	 add		%g3, 1, %g3
+2:
+	ld		[%o1], %o4
+	add		%o0, -12, %o0
+	ld		[%o1 + 4], %o5
+	add		%g3, 2, %g3
+	b		9f
+	 add		%o1, -4, %o1
+3:
+	ld		[%o1], %g1
+	add		%o0, -4, %o0
+	ld		[%o1 + 4], %o3
+	srl		%o2, 2, %g3
+	b		7f
+	 add		%o1, 4, %o1
+4:
+	ld		[%o1], %o5
+	cmp		%o2, 7
+	ld		[%o1 + 4], %g1
+	srl		%o2, 2, %g3
+	bleu		10f
+	 add		%o1, 8, %o1
+
+	ld		[%o1], %o3
+	add		%g3, -1, %g3
+5:
+	sll		%o5, %g4, %g2
+	srl		%g1, %g7, %g5
+	or		%g2, %g5, %g2
+	st		%g2, [%o0]
+7:
+	ld		[%o1 + 4], %o4
+	sll		%g1, %g4, %g2
+	srl		%o3, %g7, %g5
+	or		%g2, %g5, %g2
+	st		%g2, [%o0 + 4]
+8:
+	ld		[%o1 + 8], %o5
+	sll		%o3, %g4, %g2
+	srl		%o4, %g7, %g5
+	or		%g2, %g5, %g2
+	st		%g2, [%o0 + 8]
+9:
+	ld		[%o1 + 12], %g1
+	sll		%o4, %g4, %g2
+	srl		%o5, %g7, %g5
+	addcc		%g3, -4, %g3
+	or		%g2, %g5, %g2
+	add		%o1, 16, %o1
+	st		%g2, [%o0 + 12]
+	add		%o0, 16, %o0
+	bne,a		5b
+	 ld		[%o1], %o3
+10:
+	sll		%o5, %g4, %g2
+	srl		%g1, %g7, %g5
+	srl		%g7, 3, %g3
+	or		%g2, %g5, %g2
+	sub		%o1, %g3, %o1
+	andcc		%o2, 2, %g0
+	st		%g2, [%o0]
+	be		1f
+	 andcc		%o2, 1, %g0
+
+	ldub		[%o1], %g2
+	add		%o1, 2, %o1
+	stb		%g2, [%o0 + 4]
+	add		%o0, 2, %o0
+	ldub		[%o1 - 1], %g2
+	stb		%g2, [%o0 + 3]
+1:
+	be		1f
+	 nop
+	ldub		[%o1], %g2
+	stb		%g2, [%o0 + 4]
+1:
+	retl
+	 RETL_INSN
+
+#ifdef FASTER_NONALIGNED
+
+87:	/* faster_nonaligned */
+
+	andcc		%o1, 3, %g0
+	be		3f
+	 andcc		%o1, 1, %g0
+
+	be		4f
+	 andcc		%o1, 2, %g0
+
+	ldub		[%o1], %g2
+	add		%o1, 1, %o1
+	stb		%g2, [%o0]
+	sub		%o2, 1, %o2
+	bne		3f
+	 add		%o0, 1, %o0
+4:
+	lduh		[%o1], %g2
+	add		%o1, 2, %o1
+	srl		%g2, 8, %g3
+	sub		%o2, 2, %o2
+	stb		%g3, [%o0]
+	add		%o0, 2, %o0
+	stb		%g2, [%o0 - 1]
+3:
+	 andcc		%o1, 4, %g0
+
+	bne		2f
+	 cmp		%o5, 1
+
+	ld		[%o1], %o4
+	srl		%o4, 24, %g2
+	stb		%g2, [%o0]
+	srl		%o4, 16, %g3
+	stb		%g3, [%o0 + 1]
+	srl		%o4, 8, %g2
+	stb		%g2, [%o0 + 2]
+	sub		%o2, 4, %o2
+	stb		%o4, [%o0 + 3]
+	add		%o1, 4, %o1
+	add		%o0, 4, %o0
+2:
+	be		33f
+	 cmp		%o5, 2
+	be		32f
+	 sub		%o2, 4, %o2
+31:
+	ld		[%o1], %g2
+	add		%o1, 4, %o1
+	srl		%g2, 24, %g3
+	and		%o0, 7, %g5
+	stb		%g3, [%o0]
+	cmp		%g5, 7
+	sll		%g2, 8, %g1
+	add		%o0, 4, %o0
+	be		41f
+	 and		%o2, 0xffffffc0, %o3
+	ld		[%o0 - 7], %o4
+4:
+	SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
+	SMOVE_CHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
+	SMOVE_CHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
+	SMOVE_CHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
+	subcc		%o3, 64, %o3
+	add		%o1, 64, %o1
+	bne		4b
+	 add		%o0, 64, %o0
+
+	andcc		%o2, 0x30, %o3
+	be,a		1f
+	 srl		%g1, 16, %g2
+4:
+	SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
+	subcc		%o3, 16, %o3
+	add		%o1, 16, %o1
+	bne		4b
+	 add		%o0, 16, %o0
+
+	srl		%g1, 16, %g2
+1:
+	st		%o4, [%o0 - 7]
+	sth		%g2, [%o0 - 3]
+	srl		%g1, 8, %g4
+	b		88f
+	 stb		%g4, [%o0 - 1]
+32:
+	ld		[%o1], %g2
+	add		%o1, 4, %o1
+	srl		%g2, 16, %g3
+	and		%o0, 7, %g5
+	sth		%g3, [%o0]
+	cmp		%g5, 6
+	sll		%g2, 16, %g1
+	add		%o0, 4, %o0
+	be		42f
+	 and		%o2, 0xffffffc0, %o3
+	ld		[%o0 - 6], %o4
+4:
+	SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
+	SMOVE_CHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
+	SMOVE_CHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
+	SMOVE_CHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
+	subcc		%o3, 64, %o3
+	add		%o1, 64, %o1
+	bne		4b
+	 add		%o0, 64, %o0
+
+	andcc		%o2, 0x30, %o3
+	be,a		1f
+	 srl		%g1, 16, %g2
+4:
+	SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
+	subcc		%o3, 16, %o3
+	add		%o1, 16, %o1
+	bne		4b
+	 add		%o0, 16, %o0
+
+	srl		%g1, 16, %g2
+1:
+	st		%o4, [%o0 - 6]
+	b		88f
+	 sth		%g2, [%o0 - 2]
+33:
+	ld		[%o1], %g2
+	sub		%o2, 4, %o2
+	srl		%g2, 24, %g3
+	and		%o0, 7, %g5
+	stb		%g3, [%o0]
+	cmp		%g5, 5
+	srl		%g2, 8, %g4
+	sll		%g2, 24, %g1
+	sth		%g4, [%o0 + 1]
+	add		%o1, 4, %o1
+	be		43f
+	 and		%o2, 0xffffffc0, %o3
+
+	ld		[%o0 - 1], %o4
+	add		%o0, 4, %o0
+4:
+	SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
+	SMOVE_CHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
+	SMOVE_CHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
+	SMOVE_CHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
+	subcc		%o3, 64, %o3
+	add		%o1, 64, %o1
+	bne		4b
+	 add		%o0, 64, %o0
+
+	andcc		%o2, 0x30, %o3
+	be,a		1f
+	 srl		%g1, 24, %g2
+4:
+	SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
+	subcc		%o3, 16, %o3
+	add		%o1, 16, %o1
+	bne		4b
+	 add		%o0, 16, %o0
+
+	srl		%g1, 24, %g2
+1:
+	st		%o4, [%o0 - 5]
+	b		88f
+	 stb		%g2, [%o0 - 1]
+41:
+	SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
+	SMOVE_ALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
+	SMOVE_ALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
+	SMOVE_ALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
+	subcc		%o3, 64, %o3
+	add		%o1, 64, %o1
+	bne		41b
+	 add		%o0, 64, %o0
+	 
+	andcc		%o2, 0x30, %o3
+	be,a		1f
+	 srl		%g1, 16, %g2
+4:
+	SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
+	subcc		%o3, 16, %o3
+	add		%o1, 16, %o1
+	bne		4b
+	 add		%o0, 16, %o0
+
+	srl		%g1, 16, %g2
+1:
+	sth		%g2, [%o0 - 3]
+	srl		%g1, 8, %g4
+	b		88f
+	 stb		%g4, [%o0 - 1]
+43:
+	SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
+	SMOVE_ALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
+	SMOVE_ALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
+	SMOVE_ALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
+	subcc		%o3, 64, %o3
+	add		%o1, 64, %o1
+	bne		43b
+	 add		%o0, 64, %o0
+
+	andcc		%o2, 0x30, %o3
+	be,a		1f
+	 srl		%g1, 24, %g2
+4:
+	SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
+	subcc		%o3, 16, %o3
+	add		%o1, 16, %o1
+	bne		4b
+	 add		%o0, 16, %o0
+
+	srl		%g1, 24, %g2
+1:
+	stb		%g2, [%o0 + 3]
+	b		88f
+	 add		%o0, 4, %o0
+42:
+	SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
+	SMOVE_ALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
+	SMOVE_ALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
+	SMOVE_ALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
+	subcc		%o3, 64, %o3
+	add		%o1, 64, %o1
+	bne		42b
+	 add		%o0, 64, %o0
+	 
+	andcc		%o2, 0x30, %o3
+	be,a		1f
+	 srl		%g1, 16, %g2
+4:
+	SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
+	subcc		%o3, 16, %o3
+	add		%o1, 16, %o1
+	bne		4b
+	 add		%o0, 16, %o0
+
+	srl		%g1, 16, %g2
+1:
+	sth		%g2, [%o0 - 2]
+
+	/* Fall through */
+	 
+#endif /* FASTER_NONALIGNED */
+
+88:	/* short_end */
+
+	and		%o2, 0xe, %o3
+20:
+	sethi		%hi(89f), %o5
+	sll		%o3, 3, %o4
+	add		%o0, %o3, %o0
+	sub		%o5, %o4, %o5
+	add		%o1, %o3, %o1
+	jmpl		%o5 + %lo(89f), %g0
+	 andcc		%o2, 1, %g0
+
+	MOVE_SHORTCHUNK(o1, o0, 0x0c, g2, g3)
+	MOVE_SHORTCHUNK(o1, o0, 0x0a, g2, g3)
+	MOVE_SHORTCHUNK(o1, o0, 0x08, g2, g3)
+	MOVE_SHORTCHUNK(o1, o0, 0x06, g2, g3)
+	MOVE_SHORTCHUNK(o1, o0, 0x04, g2, g3)
+	MOVE_SHORTCHUNK(o1, o0, 0x02, g2, g3)
+	MOVE_SHORTCHUNK(o1, o0, 0x00, g2, g3)
+
+89:	/* short_table_end */
+
+	be		1f
+	 nop
+
+	ldub		[%o1], %g2
+	stb		%g2, [%o0]
+1:
+	retl
+ 	 RETL_INSN
+
+90:	/* short_aligned_end */
+	bne		88b
+	 andcc		%o2, 8, %g0
+
+	be		1f
+	 andcc		%o2, 4, %g0
+
+	ld		[%o1 + 0x00], %g2
+	ld		[%o1 + 0x04], %g3
+	add		%o1, 8, %o1
+	st		%g2, [%o0 + 0x00]
+	st		%g3, [%o0 + 0x04]
+	add		%o0, 8, %o0
+1:
+	b		81b
+	 mov		%o2, %g1
diff --git a/arch/sparc/lib/memscan.S b/arch/sparc/lib/memscan.S
new file mode 100644
index 0000000..28e78ff
--- /dev/null
+++ b/arch/sparc/lib/memscan.S
@@ -0,0 +1,133 @@
+/* $Id: memscan.S,v 1.4 1996/09/08 02:01:20 davem Exp $
+ * memscan.S: Optimized memscan for the Sparc.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+/* In essence, this is just a fancy strlen. */
+
+#define LO_MAGIC 0x01010101
+#define HI_MAGIC 0x80808080
+
+	.text
+	.align	4
+	.globl	__memscan_zero, __memscan_generic
+	.globl	memscan
+__memscan_zero:
+	/* %o0 = addr, %o1 = size */
+	cmp	%o1, 0
+	bne,a	1f
+	 andcc	%o0, 3, %g0
+
+	retl
+	 nop
+
+1:
+	be	mzero_scan_word
+	 sethi	%hi(HI_MAGIC), %g2
+
+	ldsb	[%o0], %g3
+mzero_still_not_word_aligned:
+	cmp	%g3, 0
+	bne	1f
+	 add	%o0, 1, %o0
+
+	retl
+	 sub	%o0, 1, %o0
+
+1:
+	subcc	%o1, 1, %o1
+	bne,a	1f
+	 andcc	%o0, 3, %g0
+
+	retl
+	 nop
+
+1:
+	bne,a	mzero_still_not_word_aligned
+	 ldsb	[%o0], %g3
+
+	sethi	%hi(HI_MAGIC), %g2
+mzero_scan_word:
+	or	%g2, %lo(HI_MAGIC), %o3
+	sethi	%hi(LO_MAGIC), %g3
+	or	%g3, %lo(LO_MAGIC), %o2
+mzero_next_word:
+	ld	[%o0], %g2
+mzero_next_word_preloaded:
+	sub	%g2, %o2, %g2
+mzero_next_word_preloaded_next:
+	andcc	%g2, %o3, %g0
+	bne	mzero_byte_zero
+	 add	%o0, 4, %o0
+
+mzero_check_out_of_fuel:
+	subcc	%o1, 4, %o1
+	bg,a	1f
+	 ld	[%o0], %g2
+
+	retl
+	 nop
+
+1:
+	b	mzero_next_word_preloaded_next
+	 sub	%g2, %o2, %g2
+
+	/* Check every byte. */
+mzero_byte_zero:
+	ldsb	[%o0 - 4], %g2
+	cmp	%g2, 0
+	bne	mzero_byte_one
+	 sub	%o0, 4, %g3
+
+	retl
+	 mov	%g3, %o0
+
+mzero_byte_one:
+	ldsb	[%o0 - 3], %g2
+	cmp	%g2, 0
+	bne,a	mzero_byte_two_and_three
+	 ldsb	[%o0 - 2], %g2
+
+	retl
+	 sub	%o0, 3, %o0
+
+mzero_byte_two_and_three:
+	cmp	%g2, 0
+	bne,a	1f
+	 ldsb	[%o0 - 1], %g2
+
+	retl
+	 sub	%o0, 2, %o0
+
+1:
+	cmp	%g2, 0
+	bne,a	mzero_next_word_preloaded
+	 ld	[%o0], %g2
+
+	retl
+	 sub	%o0, 1, %o0
+
+mzero_found_it:
+	retl
+	 sub	%o0, 2, %o0
+
+memscan:
+__memscan_generic:
+	/* %o0 = addr, %o1 = c, %o2 = size */
+	cmp	%o2, 0
+	bne,a	0f
+	 ldub	[%o0], %g2
+
+	b,a	2f
+1:
+	ldub	[%o0], %g2
+0:
+	cmp	%g2, %o1
+	be	2f
+	 addcc	%o2, -1, %o2
+	bne	1b
+	 add	%o0, 1, %o0
+2:
+	retl
+	 nop
diff --git a/arch/sparc/lib/memset.S b/arch/sparc/lib/memset.S
new file mode 100644
index 0000000..a65eba4
--- /dev/null
+++ b/arch/sparc/lib/memset.S
@@ -0,0 +1,203 @@
+/* linux/arch/sparc/lib/memset.S: Sparc optimized memset, bzero and clear_user code
+ * Copyright (C) 1991,1996 Free Software Foundation
+ * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ *
+ * Returns 0, if ok, and number of bytes not yet set if exception
+ * occurs and we were called as clear_user.
+ */
+
+#include <asm/ptrace.h>
+
+/* Work around cpp -rob */
+#define ALLOC #alloc
+#define EXECINSTR #execinstr
+#define EX(x,y,a,b) 				\
+98: 	x,y;					\
+	.section .fixup,ALLOC,EXECINSTR;	\
+	.align	4;				\
+99:	ba 30f;					\
+	 a, b, %o0;				\
+	.section __ex_table,ALLOC;		\
+	.align	4;				\
+	.word	98b, 99b;			\
+	.text;					\
+	.align	4
+
+#define EXT(start,end,handler) 			\
+	.section __ex_table,ALLOC;		\
+	.align	4;				\
+	.word	start, 0, end, handler;		\
+	.text;					\
+	.align	4
+
+/* Please don't change these macros, unless you change the logic
+ * in the .fixup section below as well.
+ * Store 64 bytes at (BASE + OFFSET) using value SOURCE. */
+#define ZERO_BIG_BLOCK(base, offset, source)    \
+	std	source, [base + offset + 0x00]; \
+	std	source, [base + offset + 0x08]; \
+	std	source, [base + offset + 0x10]; \
+	std	source, [base + offset + 0x18]; \
+	std	source, [base + offset + 0x20]; \
+	std	source, [base + offset + 0x28]; \
+	std	source, [base + offset + 0x30]; \
+	std	source, [base + offset + 0x38];
+
+#define ZERO_LAST_BLOCKS(base, offset, source)	\
+	std	source, [base - offset - 0x38]; \
+	std	source, [base - offset - 0x30]; \
+	std	source, [base - offset - 0x28]; \
+	std	source, [base - offset - 0x20]; \
+	std	source, [base - offset - 0x18]; \
+	std	source, [base - offset - 0x10]; \
+	std	source, [base - offset - 0x08]; \
+	std	source, [base - offset - 0x00];
+
+	.text
+	.align 4
+
+        .globl  __bzero_begin
+__bzero_begin:
+
+	.globl	__bzero, __memset, 
+	.globl	memset
+	.globl	__memset_start, __memset_end
+__memset_start:
+__memset:
+memset:
+	and	%o1, 0xff, %g3
+	sll	%g3, 8, %g2
+	or	%g3, %g2, %g3
+	sll	%g3, 16, %g2
+	or	%g3, %g2, %g3
+	b	1f
+	 mov	%o2, %o1
+3:
+	cmp	%o2, 3
+	be	2f
+	 EX(stb	%g3, [%o0], sub %o1, 0)
+
+	cmp	%o2, 2
+	be	2f
+	 EX(stb	%g3, [%o0 + 0x01], sub %o1, 1)
+
+	EX(stb	%g3, [%o0 + 0x02], sub %o1, 2)
+2:
+	sub	%o2, 4, %o2
+	add	%o1, %o2, %o1
+	b	4f
+	 sub	%o0, %o2, %o0
+
+__bzero:
+	mov	%g0, %g3
+1:
+	cmp	%o1, 7
+	bleu	7f
+	 andcc	%o0, 3, %o2
+
+	bne	3b
+4:
+	 andcc	%o0, 4, %g0
+
+	be	2f
+	 mov	%g3, %g2
+
+	EX(st	%g3, [%o0], sub %o1, 0)
+	sub	%o1, 4, %o1
+	add	%o0, 4, %o0
+2:
+	andcc	%o1, 0xffffff80, %o3	! Now everything is 8 aligned and o1 is len to run
+	be	9f
+	 andcc	%o1, 0x78, %o2
+10:
+	ZERO_BIG_BLOCK(%o0, 0x00, %g2)
+	subcc	%o3, 128, %o3
+	ZERO_BIG_BLOCK(%o0, 0x40, %g2)
+11:
+	EXT(10b, 11b, 20f)
+	bne	10b
+	 add	%o0, 128, %o0
+
+	orcc	%o2, %g0, %g0
+9:
+	be	13f
+	 andcc	%o1, 7, %o1
+
+	srl	%o2, 1, %o3
+	set	13f, %o4
+	sub	%o4, %o3, %o4
+	jmp	%o4
+	 add	%o0, %o2, %o0
+
+12:
+	ZERO_LAST_BLOCKS(%o0, 0x48, %g2)
+	ZERO_LAST_BLOCKS(%o0, 0x08, %g2)
+13:
+	be	8f
+	 andcc	%o1, 4, %g0
+
+	be	1f
+	 andcc	%o1, 2, %g0
+
+	EX(st	%g3, [%o0], and %o1, 7)
+	add	%o0, 4, %o0
+1:
+	be	1f
+	 andcc	%o1, 1, %g0
+
+	EX(sth	%g3, [%o0], and %o1, 3)
+	add	%o0, 2, %o0
+1:
+	bne,a	8f
+	 EX(stb	%g3, [%o0], and %o1, 1)
+8:
+	retl
+	 clr	%o0
+7:
+	be	13b
+	 orcc	%o1, 0, %g0
+
+	be	0f
+8:
+	 add	%o0, 1, %o0
+	subcc	%o1, 1, %o1
+	bne,a	8b
+	 EX(stb	%g3, [%o0 - 1], add %o1, 1)
+0:
+	retl
+	 clr	%o0
+__memset_end:
+
+	.section .fixup,#alloc,#execinstr
+	.align	4
+20:
+	cmp	%g2, 8
+	bleu	1f
+	 and	%o1, 0x7f, %o1
+	sub	%g2, 9, %g2
+	add	%o3, 64, %o3
+1:
+	sll	%g2, 3, %g2
+	add	%o3, %o1, %o0
+	b 30f
+	 sub	%o0, %g2, %o0
+21:
+	mov	8, %o0
+	and	%o1, 7, %o1
+	sub	%o0, %g2, %o0
+	sll	%o0, 3, %o0
+	b 30f
+	 add	%o0, %o1, %o0
+30:
+/* %o4 is faulting address, %o5 is %pc where fault occurred */
+	save	%sp, -104, %sp
+	mov	%i5, %o0
+	mov	%i7, %o1
+	call	lookup_fault
+	 mov	%i4, %o2
+	ret
+	 restore
+
+	.globl __bzero_end
+__bzero_end:
diff --git a/arch/sparc/lib/mul.S b/arch/sparc/lib/mul.S
new file mode 100644
index 0000000..83dffbc
--- /dev/null
+++ b/arch/sparc/lib/mul.S
@@ -0,0 +1,135 @@
+/* $Id: mul.S,v 1.4 1996/09/30 02:22:32 davem Exp $
+ * mul.S:       This routine was taken from glibc-1.09 and is covered
+ *              by the GNU Library General Public License Version 2.
+ */
+
+/*
+ * Signed multiply, from Appendix E of the Sparc Version 8
+ * Architecture Manual.
+ */
+
+/*
+ * Returns %o0 * %o1 in %o1%o0 (i.e., %o1 holds the upper 32 bits of
+ * the 64-bit product).
+ *
+ * This code optimizes short (less than 13-bit) multiplies.
+ */
+
+	.globl .mul
+.mul:
+	mov	%o0, %y		! multiplier -> Y
+	andncc	%o0, 0xfff, %g0	! test bits 12..31
+	be	Lmul_shortway	! if zero, can do it the short way
+	 andcc	%g0, %g0, %o4	! zero the partial product and clear N and V
+
+	/*
+	 * Long multiply.  32 steps, followed by a final shift step.
+	 */
+	mulscc	%o4, %o1, %o4	! 1
+	mulscc	%o4, %o1, %o4	! 2
+	mulscc	%o4, %o1, %o4	! 3
+	mulscc	%o4, %o1, %o4	! 4
+	mulscc	%o4, %o1, %o4	! 5
+	mulscc	%o4, %o1, %o4	! 6
+	mulscc	%o4, %o1, %o4	! 7
+	mulscc	%o4, %o1, %o4	! 8
+	mulscc	%o4, %o1, %o4	! 9
+	mulscc	%o4, %o1, %o4	! 10
+	mulscc	%o4, %o1, %o4	! 11
+	mulscc	%o4, %o1, %o4	! 12
+	mulscc	%o4, %o1, %o4	! 13
+	mulscc	%o4, %o1, %o4	! 14
+	mulscc	%o4, %o1, %o4	! 15
+	mulscc	%o4, %o1, %o4	! 16
+	mulscc	%o4, %o1, %o4	! 17
+	mulscc	%o4, %o1, %o4	! 18
+	mulscc	%o4, %o1, %o4	! 19
+	mulscc	%o4, %o1, %o4	! 20
+	mulscc	%o4, %o1, %o4	! 21
+	mulscc	%o4, %o1, %o4	! 22
+	mulscc	%o4, %o1, %o4	! 23
+	mulscc	%o4, %o1, %o4	! 24
+	mulscc	%o4, %o1, %o4	! 25
+	mulscc	%o4, %o1, %o4	! 26
+	mulscc	%o4, %o1, %o4	! 27
+	mulscc	%o4, %o1, %o4	! 28
+	mulscc	%o4, %o1, %o4	! 29
+	mulscc	%o4, %o1, %o4	! 30
+	mulscc	%o4, %o1, %o4	! 31
+	mulscc	%o4, %o1, %o4	! 32
+	mulscc	%o4, %g0, %o4	! final shift
+
+	! If %o0 was negative, the result is
+	!	(%o0 * %o1) + (%o1 << 32))
+	! We fix that here.
+
+#if 0
+	tst	%o0
+	bge	1f
+	 rd	%y, %o0
+
+	! %o0 was indeed negative; fix upper 32 bits of result by subtracting 
+	! %o1 (i.e., return %o4 - %o1 in %o1).
+	retl
+	 sub	%o4, %o1, %o1
+
+1:
+	retl
+	 mov	%o4, %o1
+#else
+	/* Faster code adapted from tege@sics.se's code for umul.S.  */
+	sra	%o0, 31, %o2	! make mask from sign bit
+	and	%o1, %o2, %o2	! %o2 = 0 or %o1, depending on sign of %o0
+	rd	%y, %o0		! get lower half of product
+	retl
+	 sub	%o4, %o2, %o1	! subtract compensation 
+				!  and put upper half in place
+#endif
+
+Lmul_shortway:
+	/*
+	 * Short multiply.  12 steps, followed by a final shift step.
+	 * The resulting bits are off by 12 and (32-12) = 20 bit positions,
+	 * but there is no problem with %o0 being negative (unlike above).
+	 */
+	mulscc	%o4, %o1, %o4	! 1
+	mulscc	%o4, %o1, %o4	! 2
+	mulscc	%o4, %o1, %o4	! 3
+	mulscc	%o4, %o1, %o4	! 4
+	mulscc	%o4, %o1, %o4	! 5
+	mulscc	%o4, %o1, %o4	! 6
+	mulscc	%o4, %o1, %o4	! 7
+	mulscc	%o4, %o1, %o4	! 8
+	mulscc	%o4, %o1, %o4	! 9
+	mulscc	%o4, %o1, %o4	! 10
+	mulscc	%o4, %o1, %o4	! 11
+	mulscc	%o4, %o1, %o4	! 12
+	mulscc	%o4, %g0, %o4	! final shift
+
+	/*
+	 *  %o4 has 20 of the bits that should be in the low part of the
+	 * result; %y has the bottom 12 (as %y's top 12).  That is:
+	 *
+	 *	  %o4		    %y
+	 * +----------------+----------------+
+	 * | -12- |   -20-  | -12- |   -20-  |
+	 * +------(---------+------)---------+
+	 *  --hi-- ----low-part----
+	 *
+	 * The upper 12 bits of %o4 should be sign-extended to form the
+	 * high part of the product (i.e., highpart = %o4 >> 20).
+	 */
+
+	rd	%y, %o5
+	sll	%o4, 12, %o0	! shift middle bits left 12
+	srl	%o5, 20, %o5	! shift low bits right 20, zero fill at left
+	or	%o5, %o0, %o0	! construct low part of result
+	retl
+	 sra	%o4, 20, %o1	! ... and extract high part of result
+
+	.globl	.mul_patch
+.mul_patch:
+	smul	%o0, %o1, %o0
+	retl
+	 rd	%y, %o1
+	nop
diff --git a/arch/sparc/lib/muldi3.S b/arch/sparc/lib/muldi3.S
new file mode 100644
index 0000000..7f17872
--- /dev/null
+++ b/arch/sparc/lib/muldi3.S
@@ -0,0 +1,76 @@
+/* Copyright (C) 1989, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING.  If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA.  */
+
+	.text
+	.align 4
+	.globl __muldi3
+__muldi3:
+	save  %sp, -104, %sp
+	wr  %g0, %i1, %y
+	sra  %i3, 0x1f, %g2
+	and  %i1, %g2, %g2
+	andcc  %g0, 0, %g1
+	mulscc  %g1, %i3, %g1
+	mulscc  %g1, %i3, %g1
+	mulscc  %g1, %i3, %g1
+	mulscc  %g1, %i3, %g1
+	mulscc  %g1, %i3, %g1
+	mulscc  %g1, %i3, %g1
+	mulscc  %g1, %i3, %g1
+	mulscc  %g1, %i3, %g1
+	mulscc  %g1, %i3, %g1
+	mulscc  %g1, %i3, %g1
+	mulscc  %g1, %i3, %g1
+	mulscc  %g1, %i3, %g1
+	mulscc  %g1, %i3, %g1
+	mulscc  %g1, %i3, %g1
+	mulscc  %g1, %i3, %g1
+	mulscc  %g1, %i3, %g1
+	mulscc  %g1, %i3, %g1
+	mulscc  %g1, %i3, %g1
+	mulscc  %g1, %i3, %g1
+	mulscc  %g1, %i3, %g1
+	mulscc  %g1, %i3, %g1
+	mulscc  %g1, %i3, %g1
+	mulscc  %g1, %i3, %g1
+	mulscc  %g1, %i3, %g1
+	mulscc  %g1, %i3, %g1
+	mulscc  %g1, %i3, %g1
+	mulscc  %g1, %i3, %g1
+	mulscc  %g1, %i3, %g1
+	mulscc  %g1, %i3, %g1
+	mulscc  %g1, %i3, %g1
+	mulscc  %g1, %i3, %g1
+	mulscc  %g1, %i3, %g1
+	mulscc  %g1, 0, %g1
+	add  %g1, %g2, %l2
+	rd  %y, %o1
+	mov  %o1, %l3
+	mov  %i1, %o0
+	call  .umul
+	mov  %i2, %o1
+	mov  %o0, %l0
+	mov  %i0, %o0
+	call  .umul
+	mov  %i3, %o1
+	add  %l0, %o0, %l0
+	mov  %l2, %i0
+	add  %l2, %l0, %i0
+	ret 
+	restore  %g0, %l3, %o1
diff --git a/arch/sparc/lib/rem.S b/arch/sparc/lib/rem.S
new file mode 100644
index 0000000..4450814
--- /dev/null
+++ b/arch/sparc/lib/rem.S
@@ -0,0 +1,382 @@
+/* $Id: rem.S,v 1.7 1996/09/30 02:22:34 davem Exp $
+ * rem.S:       This routine was taken from glibc-1.09 and is covered
+ *              by the GNU Library General Public License Version 2.
+ */
+
+
+/* This file is generated from divrem.m4; DO NOT EDIT! */
+/*
+ * Division and remainder, from Appendix E of the Sparc Version 8
+ * Architecture Manual, with fixes from Gordon Irlam.
+ */
+
+/*
+ * Input: dividend and divisor in %o0 and %o1 respectively.
+ *
+ * m4 parameters:
+ *  .rem	name of function to generate
+ *  rem		rem=div => %o0 / %o1; rem=rem => %o0 % %o1
+ *  true		true=true => signed; true=false => unsigned
+ *
+ * Algorithm parameters:
+ *  N		how many bits per iteration we try to get (4)
+ *  WORDSIZE	total number of bits (32)
+ *
+ * Derived constants:
+ *  TOPBITS	number of bits in the top decade of a number
+ *
+ * Important variables:
+ *  Q		the partial quotient under development (initially 0)
+ *  R		the remainder so far, initially the dividend
+ *  ITER	number of main division loop iterations required;
+ *		equal to ceil(log2(quotient) / N).  Note that this
+ *		is the log base (2^N) of the quotient.
+ *  V		the current comparand, initially divisor*2^(ITER*N-1)
+ *
+ * Cost:
+ *  Current estimate for non-large dividend is
+ *	ceil(log2(quotient) / N) * (10 + 7N/2) + C
+ *  A large dividend is one greater than 2^(31-TOPBITS) and takes a
+ *  different path, as the upper bits of the quotient must be developed
+ *  one bit at a time.
+ */
+
+
+	.globl .rem
+.rem:
+	! compute sign of result; if neither is negative, no problem
+	orcc	%o1, %o0, %g0	! either negative?
+	bge	2f			! no, go do the divide
+	 mov	%o0, %g2	! compute sign in any case
+
+	tst	%o1
+	bge	1f
+	 tst	%o0
+	! %o1 is definitely negative; %o0 might also be negative
+	bge	2f			! if %o0 not negative...
+	 sub	%g0, %o1, %o1	! in any case, make %o1 nonneg
+1:	! %o0 is negative, %o1 is nonnegative
+	sub	%g0, %o0, %o0	! make %o0 nonnegative
+2:
+
+	! Ready to divide.  Compute size of quotient; scale comparand.
+	orcc	%o1, %g0, %o5
+	bne	1f
+	 mov	%o0, %o3
+
+		! Divide by zero trap.  If it returns, return 0 (about as
+		! wrong as possible, but that is what SunOS does...).
+		ta	ST_DIV0
+		retl
+		 clr	%o0
+
+1:
+	cmp	%o3, %o5			! if %o1 exceeds %o0, done
+	blu	Lgot_result		! (and algorithm fails otherwise)
+	 clr	%o2
+
+	sethi	%hi(1 << (32 - 4 - 1)), %g1
+
+	cmp	%o3, %g1
+	blu	Lnot_really_big
+	 clr	%o4
+
+	! Here the dividend is >= 2**(31-N) or so.  We must be careful here,
+	! as our usual N-at-a-shot divide step will cause overflow and havoc.
+	! The number of bits in the result here is N*ITER+SC, where SC <= N.
+	! Compute ITER in an unorthodox manner: know we need to shift V into
+	! the top decade: so do not even bother to compare to R.
+	1:
+		cmp	%o5, %g1
+		bgeu	3f
+		 mov	1, %g7
+
+		sll	%o5, 4, %o5
+
+		b	1b
+		 add	%o4, 1, %o4
+
+	! Now compute %g7.
+	2:
+		addcc	%o5, %o5, %o5
+
+		bcc	Lnot_too_big
+		 add	%g7, 1, %g7
+
+		! We get here if the %o1 overflowed while shifting.
+		! This means that %o3 has the high-order bit set.
+		! Restore %o5 and subtract from %o3.
+		sll	%g1, 4, %g1	! high order bit
+		srl	%o5, 1, %o5		! rest of %o5
+		add	%o5, %g1, %o5
+
+		b	Ldo_single_div
+		 sub	%g7, 1, %g7
+
+	Lnot_too_big:
+	3:
+		cmp	%o5, %o3
+		blu	2b
+		 nop
+
+		be	Ldo_single_div
+		 nop
+	/* NB: these are commented out in the V8-Sparc manual as well */
+	/* (I do not understand this) */
+	! %o5 > %o3: went too far: back up 1 step
+	!	srl	%o5, 1, %o5
+	!	dec	%g7
+	! do single-bit divide steps
+	!
+	! We have to be careful here.  We know that %o3 >= %o5, so we can do the
+	! first divide step without thinking.  BUT, the others are conditional,
+	! and are only done if %o3 >= 0.  Because both %o3 and %o5 may have the high-
+	! order bit set in the first step, just falling into the regular
+	! division loop will mess up the first time around.
+	! So we unroll slightly...
+	Ldo_single_div:
+		subcc	%g7, 1, %g7
+		bl	Lend_regular_divide
+		 nop
+
+		sub	%o3, %o5, %o3
+		mov	1, %o2
+
+		b	Lend_single_divloop
+		 nop
+	Lsingle_divloop:
+		sll	%o2, 1, %o2
+
+		bl	1f
+		 srl	%o5, 1, %o5
+		! %o3 >= 0
+		sub	%o3, %o5, %o3
+
+		b	2f
+		 add	%o2, 1, %o2
+	1:	! %o3 < 0
+		add	%o3, %o5, %o3
+		sub	%o2, 1, %o2
+	2:
+	Lend_single_divloop:
+		subcc	%g7, 1, %g7
+		bge	Lsingle_divloop
+		 tst	%o3
+
+		b,a	Lend_regular_divide
+
+Lnot_really_big:
+1:
+	sll	%o5, 4, %o5
+	cmp	%o5, %o3
+	bleu	1b
+	 addcc	%o4, 1, %o4
+	be	Lgot_result
+	 sub	%o4, 1, %o4
+
+	tst	%o3	! set up for initial iteration
+Ldivloop:
+	sll	%o2, 4, %o2
+		! depth 1, accumulated bits 0
+	bl	L.1.16
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+			! depth 2, accumulated bits 1
+	bl	L.2.17
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+			! depth 3, accumulated bits 3
+	bl	L.3.19
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+			! depth 4, accumulated bits 7
+	bl	L.4.23
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+
+	b	9f
+	 add	%o2, (7*2+1), %o2
+	
+L.4.23:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (7*2-1), %o2
+	
+L.3.19:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+			! depth 4, accumulated bits 5
+	bl	L.4.21
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (5*2+1), %o2
+	
+L.4.21:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (5*2-1), %o2
+	
+L.2.17:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+			! depth 3, accumulated bits 1
+	bl	L.3.17
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+			! depth 4, accumulated bits 3
+	bl	L.4.19
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (3*2+1), %o2
+
+L.4.19:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (3*2-1), %o2
+
+L.3.17:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+			! depth 4, accumulated bits 1
+	bl	L.4.17
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (1*2+1), %o2
+
+L.4.17:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (1*2-1), %o2
+
+L.1.16:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+			! depth 2, accumulated bits -1
+	bl	L.2.15
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+			! depth 3, accumulated bits -1
+	bl	L.3.15
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+			! depth 4, accumulated bits -1
+	bl	L.4.15
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (-1*2+1), %o2
+
+L.4.15:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (-1*2-1), %o2
+
+L.3.15:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+			! depth 4, accumulated bits -3
+	bl	L.4.13
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (-3*2+1), %o2
+
+L.4.13:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (-3*2-1), %o2
+
+L.2.15:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+			! depth 3, accumulated bits -3
+	bl	L.3.13
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+			! depth 4, accumulated bits -5
+	bl	L.4.11
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (-5*2+1), %o2
+
+L.4.11:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (-5*2-1), %o2
+
+
+L.3.13:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+			! depth 4, accumulated bits -7
+	bl	L.4.9
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (-7*2+1), %o2
+
+L.4.9:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (-7*2-1), %o2
+
+	9:
+Lend_regular_divide:
+	subcc	%o4, 1, %o4
+	bge	Ldivloop
+	 tst	%o3
+
+	bl,a	Lgot_result
+	! non-restoring fixup here (one instruction only!)
+	add	%o3, %o1, %o3
+
+Lgot_result:
+	! check to see if answer should be < 0
+	tst	%g2
+	bl,a	1f
+	 sub %g0, %o3, %o3
+1:
+	retl
+	 mov %o3, %o0
+
+	.globl	.rem_patch
+.rem_patch:
+	sra	%o0, 0x1f, %o4
+	wr	%o4, 0x0, %y
+	nop
+	nop
+	nop
+	sdivcc	%o0, %o1, %o2
+	bvs,a	1f
+	 xnor	%o2, %g0, %o2
+1:	smul	%o2, %o1, %o2
+	retl
+	 sub	%o0, %o2, %o0
+	nop
diff --git a/arch/sparc/lib/rwsem.S b/arch/sparc/lib/rwsem.S
new file mode 100644
index 0000000..e7578dc
--- /dev/null
+++ b/arch/sparc/lib/rwsem.S
@@ -0,0 +1,205 @@
+/* $Id: rwsem.S,v 1.5 2000/05/09 17:40:13 davem Exp $
+ * Assembly part of rw semaphores.
+ *
+ * Copyright (C) 1999 Jakub Jelinek (jakub@redhat.com)
+ */
+
+#include <linux/config.h>
+#include <asm/ptrace.h>
+#include <asm/psr.h>
+
+	.section .sched.text
+	.align	4
+
+	.globl		___down_read
+___down_read:
+	rd		%psr, %g3
+	nop
+	nop
+	nop
+	or		%g3, PSR_PIL, %g7
+	wr		%g7, 0, %psr
+	nop
+	nop
+	nop
+#ifdef CONFIG_SMP
+1:	ldstub		[%g1 + 4], %g7
+	tst		%g7
+	bne		1b
+	 ld		[%g1], %g7
+	sub		%g7, 1, %g7
+	st		%g7, [%g1]
+	stb		%g0, [%g1 + 4]
+#else
+	ld		[%g1], %g7
+	sub		%g7, 1, %g7
+	st		%g7, [%g1]
+#endif
+	wr		%g3, 0, %psr
+	add		%g7, 1, %g7
+	nop
+	nop
+	subcc		%g7, 1, %g7
+	bneg		3f
+	 nop
+2:	jmpl		%o7, %g0
+	 mov		%g4, %o7
+3:	save		%sp, -64, %sp
+	mov		%g1, %l1
+	mov		%g4, %l4
+	bcs		4f
+	 mov		%g5, %l5
+	call		down_read_failed
+	 mov		%l1, %o0
+	mov		%l1, %g1
+	mov		%l4, %g4
+	ba		___down_read
+	 restore	%l5, %g0, %g5
+4:	call		down_read_failed_biased
+	 mov		%l1, %o0
+	mov		%l1, %g1
+	mov		%l4, %g4
+	ba		2b
+	 restore	%l5, %g0, %g5
+
+	.globl		___down_write
+___down_write:
+	rd		%psr, %g3
+	nop
+	nop
+	nop
+	or		%g3, PSR_PIL, %g7
+	wr		%g7, 0, %psr
+	sethi		%hi(0x01000000), %g2
+	nop
+	nop
+#ifdef CONFIG_SMP
+1:	ldstub		[%g1 + 4], %g7
+	tst		%g7
+	bne		1b
+	 ld		[%g1], %g7
+	sub		%g7, %g2, %g7
+	st		%g7, [%g1]
+	stb		%g0, [%g1 + 4]
+#else
+	ld		[%g1], %g7
+	sub		%g7, %g2, %g7
+	st		%g7, [%g1]
+#endif
+	wr		%g3, 0, %psr
+	add		%g7, %g2, %g7
+	nop
+	nop
+	subcc		%g7, %g2, %g7
+	bne		3f
+	 nop
+2:	jmpl		%o7, %g0
+	 mov		%g4, %o7
+3:	save		%sp, -64, %sp
+	mov		%g1, %l1
+	mov		%g4, %l4
+	bcs		4f
+	 mov		%g5, %l5
+	call		down_write_failed
+	 mov		%l1, %o0
+	mov		%l1, %g1
+	mov		%l4, %g4
+	ba		___down_write
+	 restore	%l5, %g0, %g5
+4:	call		down_write_failed_biased
+	 mov		%l1, %o0
+	mov		%l1, %g1
+	mov		%l4, %g4
+	ba		2b
+	 restore	%l5, %g0, %g5
+
+	.text
+	.globl		___up_read
+___up_read:
+	rd		%psr, %g3
+	nop
+	nop
+	nop
+	or		%g3, PSR_PIL, %g7
+	wr		%g7, 0, %psr
+	nop
+	nop
+	nop
+#ifdef CONFIG_SMP
+1:	ldstub		[%g1 + 4], %g7
+	tst		%g7
+	bne		1b
+	 ld		[%g1], %g7
+	add		%g7, 1, %g7
+	st		%g7, [%g1]
+	stb		%g0, [%g1 + 4]
+#else
+	ld		[%g1], %g7
+	add		%g7, 1, %g7
+	st		%g7, [%g1]
+#endif
+	wr		%g3, 0, %psr
+	nop
+	nop
+	nop
+	cmp		%g7, 0
+	be		3f
+	 nop
+2:	jmpl		%o7, %g0
+	 mov		%g4, %o7
+3:	save		%sp, -64, %sp
+	mov		%g1, %l1
+	mov		%g4, %l4
+	mov		%g5, %l5
+	clr		%o1
+	call		__rwsem_wake
+	 mov		%l1, %o0
+	mov		%l1, %g1
+	mov		%l4, %g4
+	ba		2b
+	 restore	%l5, %g0, %g5
+
+	.globl		___up_write
+___up_write:
+	rd		%psr, %g3
+	nop
+	nop
+	nop
+	or		%g3, PSR_PIL, %g7
+	wr		%g7, 0, %psr
+	sethi		%hi(0x01000000), %g2
+	nop
+	nop
+#ifdef CONFIG_SMP
+1:	ldstub		[%g1 + 4], %g7
+	tst		%g7
+	bne		1b
+	 ld		[%g1], %g7
+	add		%g7, %g2, %g7
+	st		%g7, [%g1]
+	stb		%g0, [%g1 + 4]
+#else
+	ld		[%g1], %g7
+	add		%g7, %g2, %g7
+	st		%g7, [%g1]
+#endif
+	wr		%g3, 0, %psr
+	sub		%g7, %g2, %g7
+	nop
+	nop
+	addcc		%g7, %g2, %g7
+	bcs		3f
+	 nop
+2:	jmpl		%o7, %g0
+	 mov		%g4, %o7
+3:	save		%sp, -64, %sp
+	mov		%g1, %l1
+	mov		%g4, %l4
+	mov		%g5, %l5
+	mov		%g7, %o1
+	call		__rwsem_wake
+	 mov		%l1, %o0
+	mov		%l1, %g1
+	mov		%l4, %g4
+	ba		2b
+	 restore	%l5, %g0, %g5
diff --git a/arch/sparc/lib/sdiv.S b/arch/sparc/lib/sdiv.S
new file mode 100644
index 0000000..e0ad80b
--- /dev/null
+++ b/arch/sparc/lib/sdiv.S
@@ -0,0 +1,379 @@
+/* $Id: sdiv.S,v 1.6 1996/10/02 17:37:00 davem Exp $
+ * sdiv.S:      This routine was taken from glibc-1.09 and is covered
+ *              by the GNU Library General Public License Version 2.
+ */
+
+
+/* This file is generated from divrem.m4; DO NOT EDIT! */
+/*
+ * Division and remainder, from Appendix E of the Sparc Version 8
+ * Architecture Manual, with fixes from Gordon Irlam.
+ */
+
+/*
+ * Input: dividend and divisor in %o0 and %o1 respectively.
+ *
+ * m4 parameters:
+ *  .div	name of function to generate
+ *  div		div=div => %o0 / %o1; div=rem => %o0 % %o1
+ *  true		true=true => signed; true=false => unsigned
+ *
+ * Algorithm parameters:
+ *  N		how many bits per iteration we try to get (4)
+ *  WORDSIZE	total number of bits (32)
+ *
+ * Derived constants:
+ *  TOPBITS	number of bits in the top decade of a number
+ *
+ * Important variables:
+ *  Q		the partial quotient under development (initially 0)
+ *  R		the remainder so far, initially the dividend
+ *  ITER	number of main division loop iterations required;
+ *		equal to ceil(log2(quotient) / N).  Note that this
+ *		is the log base (2^N) of the quotient.
+ *  V		the current comparand, initially divisor*2^(ITER*N-1)
+ *
+ * Cost:
+ *  Current estimate for non-large dividend is
+ *	ceil(log2(quotient) / N) * (10 + 7N/2) + C
+ *  A large dividend is one greater than 2^(31-TOPBITS) and takes a
+ *  different path, as the upper bits of the quotient must be developed
+ *  one bit at a time.
+ */
+
+
+	.globl .div
+.div:
+	! compute sign of result; if neither is negative, no problem
+	orcc	%o1, %o0, %g0	! either negative?
+	bge	2f			! no, go do the divide
+	 xor	%o1, %o0, %g2	! compute sign in any case
+
+	tst	%o1
+	bge	1f
+	 tst	%o0
+	! %o1 is definitely negative; %o0 might also be negative
+	bge	2f			! if %o0 not negative...
+	 sub	%g0, %o1, %o1	! in any case, make %o1 nonneg
+1:	! %o0 is negative, %o1 is nonnegative
+	sub	%g0, %o0, %o0	! make %o0 nonnegative
+2:
+
+	! Ready to divide.  Compute size of quotient; scale comparand.
+	orcc	%o1, %g0, %o5
+	bne	1f
+	 mov	%o0, %o3
+
+		! Divide by zero trap.  If it returns, return 0 (about as
+		! wrong as possible, but that is what SunOS does...).
+		ta	ST_DIV0
+		retl
+		 clr	%o0
+
+1:
+	cmp	%o3, %o5			! if %o1 exceeds %o0, done
+	blu	Lgot_result		! (and algorithm fails otherwise)
+	 clr	%o2
+
+	sethi	%hi(1 << (32 - 4 - 1)), %g1
+
+	cmp	%o3, %g1
+	blu	Lnot_really_big
+	 clr	%o4
+
+	! Here the dividend is >= 2**(31-N) or so.  We must be careful here,
+	! as our usual N-at-a-shot divide step will cause overflow and havoc.
+	! The number of bits in the result here is N*ITER+SC, where SC <= N.
+	! Compute ITER in an unorthodox manner: know we need to shift V into
+	! the top decade: so do not even bother to compare to R.
+	1:
+		cmp	%o5, %g1
+		bgeu	3f
+		 mov	1, %g7
+
+		sll	%o5, 4, %o5
+
+		b	1b
+		 add	%o4, 1, %o4
+
+	! Now compute %g7.
+	2:
+		addcc	%o5, %o5, %o5
+		bcc	Lnot_too_big
+		 add	%g7, 1, %g7
+
+		! We get here if the %o1 overflowed while shifting.
+		! This means that %o3 has the high-order bit set.
+		! Restore %o5 and subtract from %o3.
+		sll	%g1, 4, %g1	! high order bit
+		srl	%o5, 1, %o5		! rest of %o5
+		add	%o5, %g1, %o5
+
+		b	Ldo_single_div
+		 sub	%g7, 1, %g7
+
+	Lnot_too_big:
+	3:
+		cmp	%o5, %o3
+		blu	2b
+		 nop
+
+		be	Ldo_single_div
+		 nop
+	/* NB: these are commented out in the V8-Sparc manual as well */
+	/* (I do not understand this) */
+	! %o5 > %o3: went too far: back up 1 step
+	!	srl	%o5, 1, %o5
+	!	dec	%g7
+	! do single-bit divide steps
+	!
+	! We have to be careful here.  We know that %o3 >= %o5, so we can do the
+	! first divide step without thinking.  BUT, the others are conditional,
+	! and are only done if %o3 >= 0.  Because both %o3 and %o5 may have the high-
+	! order bit set in the first step, just falling into the regular
+	! division loop will mess up the first time around.
+	! So we unroll slightly...
+	Ldo_single_div:
+		subcc	%g7, 1, %g7
+		bl	Lend_regular_divide
+		 nop
+
+		sub	%o3, %o5, %o3
+		mov	1, %o2
+
+		b	Lend_single_divloop
+		 nop
+	Lsingle_divloop:
+		sll	%o2, 1, %o2
+
+		bl	1f
+		 srl	%o5, 1, %o5
+		! %o3 >= 0
+		sub	%o3, %o5, %o3
+
+		b	2f
+		 add	%o2, 1, %o2
+	1:	! %o3 < 0
+		add	%o3, %o5, %o3
+		sub	%o2, 1, %o2
+	2:
+	Lend_single_divloop:
+		subcc	%g7, 1, %g7
+		bge	Lsingle_divloop
+		 tst	%o3
+
+		b,a	Lend_regular_divide
+
+Lnot_really_big:
+1:
+	sll	%o5, 4, %o5
+	cmp	%o5, %o3
+	bleu	1b
+	 addcc	%o4, 1, %o4
+
+	be	Lgot_result
+	 sub	%o4, 1, %o4
+
+	tst	%o3	! set up for initial iteration
+Ldivloop:
+	sll	%o2, 4, %o2
+		! depth 1, accumulated bits 0
+	bl	L.1.16
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+			! depth 2, accumulated bits 1
+	bl	L.2.17
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+			! depth 3, accumulated bits 3
+	bl	L.3.19
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+			! depth 4, accumulated bits 7
+	bl	L.4.23
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (7*2+1), %o2
+
+L.4.23:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (7*2-1), %o2
+
+L.3.19:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+			! depth 4, accumulated bits 5
+	bl	L.4.21
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (5*2+1), %o2
+
+L.4.21:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (5*2-1), %o2
+
+L.2.17:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+			! depth 3, accumulated bits 1
+	bl	L.3.17
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+			! depth 4, accumulated bits 3
+	bl	L.4.19
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (3*2+1), %o2
+
+L.4.19:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (3*2-1), %o2
+	
+	
+L.3.17:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+			! depth 4, accumulated bits 1
+	bl	L.4.17
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (1*2+1), %o2
+
+L.4.17:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (1*2-1), %o2
+
+L.1.16:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+			! depth 2, accumulated bits -1
+	bl	L.2.15
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+			! depth 3, accumulated bits -1
+	bl	L.3.15
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+			! depth 4, accumulated bits -1
+	bl	L.4.15
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (-1*2+1), %o2
+
+L.4.15:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (-1*2-1), %o2
+
+L.3.15:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+			! depth 4, accumulated bits -3
+	bl	L.4.13
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (-3*2+1), %o2
+
+L.4.13:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (-3*2-1), %o2
+
+L.2.15:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+			! depth 3, accumulated bits -3
+	bl	L.3.13
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+			! depth 4, accumulated bits -5
+	bl	L.4.11
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (-5*2+1), %o2
+
+L.4.11:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (-5*2-1), %o2
+
+L.3.13:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+			! depth 4, accumulated bits -7
+	bl	L.4.9
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (-7*2+1), %o2
+
+L.4.9:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (-7*2-1), %o2
+
+	9:
+Lend_regular_divide:
+	subcc	%o4, 1, %o4
+	bge	Ldivloop
+	 tst	%o3
+
+	bl,a	Lgot_result
+	! non-restoring fixup here (one instruction only!)
+	sub	%o2, 1, %o2
+
+Lgot_result:
+	! check to see if answer should be < 0
+	tst	%g2
+	bl,a	1f
+	 sub %g0, %o2, %o2
+1:
+	retl
+	 mov %o2, %o0
+
+	.globl	.div_patch
+.div_patch:
+	sra	%o0, 0x1f, %o2
+	wr	%o2, 0x0, %y
+	nop
+	nop
+	nop
+	sdivcc	%o0, %o1, %o0
+	bvs,a	1f
+	 xnor	%o0, %g0, %o0
+1:	retl
+	 nop
diff --git a/arch/sparc/lib/strlen.S b/arch/sparc/lib/strlen.S
new file mode 100644
index 0000000..ed9a763
--- /dev/null
+++ b/arch/sparc/lib/strlen.S
@@ -0,0 +1,81 @@
+/* strlen.S: Sparc optimized strlen code
+ * Hand optimized from GNU libc's strlen
+ * Copyright (C) 1991,1996 Free Software Foundation
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#define LO_MAGIC 0x01010101
+#define HI_MAGIC 0x80808080
+
+0:
+	ldub	[%o0], %o5
+	cmp	%o5, 0
+	be	1f
+	 add	%o0, 1, %o0
+	andcc	%o0, 3, %g0
+	be	4f
+	 or	%o4, %lo(HI_MAGIC), %o3
+	ldub	[%o0], %o5
+	cmp	%o5, 0
+	be	2f
+	 add	%o0, 1, %o0
+	andcc	%o0, 3, %g0
+	be	5f
+	 sethi	%hi(LO_MAGIC), %o4
+	ldub	[%o0], %o5
+	cmp	%o5, 0
+	be	3f
+	 add	%o0, 1, %o0
+	b	8f
+	 or	%o4, %lo(LO_MAGIC), %o2
+1:
+	retl
+	 mov	0, %o0
+2:
+	retl
+	 mov	1, %o0
+3:
+	retl
+	 mov	2, %o0
+
+	.align 4
+	.global strlen
+strlen:
+	mov	%o0, %o1
+	andcc	%o0, 3, %g0
+	bne	0b
+	 sethi	%hi(HI_MAGIC), %o4
+	or	%o4, %lo(HI_MAGIC), %o3
+4:
+	sethi	%hi(LO_MAGIC), %o4
+5:
+	or	%o4, %lo(LO_MAGIC), %o2
+8:
+	ld	[%o0], %o5
+2:
+	sub	%o5, %o2, %o4
+	andcc	%o4, %o3, %g0
+	be	8b
+	 add	%o0, 4, %o0
+
+	/* Check every byte. */
+	srl	%o5, 24, %g5
+	andcc	%g5, 0xff, %g0
+	be	1f
+	 add	%o0, -4, %o4
+	srl	%o5, 16, %g5
+	andcc	%g5, 0xff, %g0
+	be	1f
+	 add	%o4, 1, %o4
+	srl	%o5, 8, %g5
+	andcc	%g5, 0xff, %g0
+	be	1f
+	 add	%o4, 1, %o4
+	andcc	%o5, 0xff, %g0
+	bne,a	2b
+	 ld	[%o0], %o5
+	add	%o4, 1, %o4
+1:
+	retl
+	 sub	%o4, %o1, %o0
diff --git a/arch/sparc/lib/strlen_user.S b/arch/sparc/lib/strlen_user.S
new file mode 100644
index 0000000..8c8a371
--- /dev/null
+++ b/arch/sparc/lib/strlen_user.S
@@ -0,0 +1,109 @@
+/* strlen_user.S: Sparc optimized strlen_user code
+ *
+ * Return length of string in userspace including terminating 0
+ * or 0 for error
+ *
+ * Copyright (C) 1991,1996 Free Software Foundation
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#define LO_MAGIC 0x01010101
+#define HI_MAGIC 0x80808080
+
+10:
+	ldub	[%o0], %o5
+	cmp	%o5, 0
+	be	1f
+	 add	%o0, 1, %o0
+	andcc	%o0, 3, %g0
+	be	4f
+	 or	%o4, %lo(HI_MAGIC), %o3
+11:
+	ldub	[%o0], %o5
+	cmp	%o5, 0
+	be	2f
+	 add	%o0, 1, %o0
+	andcc	%o0, 3, %g0
+	be	5f
+	 sethi	%hi(LO_MAGIC), %o4
+12:
+	ldub	[%o0], %o5
+	cmp	%o5, 0
+	be	3f
+	 add	%o0, 1, %o0
+	b	13f
+	 or	%o4, %lo(LO_MAGIC), %o2
+1:
+	retl
+	 mov	1, %o0
+2:
+	retl
+	 mov	2, %o0
+3:
+	retl
+	 mov	3, %o0
+
+	.align 4
+	.global __strlen_user, __strnlen_user
+__strlen_user:
+	sethi	%hi(32768), %o1
+__strnlen_user:
+	mov	%o1, %g1
+	mov	%o0, %o1
+	andcc	%o0, 3, %g0
+	bne	10b
+	 sethi	%hi(HI_MAGIC), %o4
+	or	%o4, %lo(HI_MAGIC), %o3
+4:
+	sethi	%hi(LO_MAGIC), %o4
+5:
+	or	%o4, %lo(LO_MAGIC), %o2
+13:
+	ld	[%o0], %o5
+2:
+	sub	%o5, %o2, %o4
+	andcc	%o4, %o3, %g0
+	bne	82f
+	 add	%o0, 4, %o0
+	sub	%o0, %o1, %g2
+81:	cmp	%g2, %g1
+	blu	13b
+	 mov	%o0, %o4
+	ba,a	1f
+
+	/* Check every byte. */
+82:	srl	%o5, 24, %g5
+	andcc	%g5, 0xff, %g0
+	be	1f
+	 add	%o0, -3, %o4
+	srl	%o5, 16, %g5
+	andcc	%g5, 0xff, %g0
+	be	1f
+	 add	%o4, 1, %o4
+	srl	%o5, 8, %g5
+	andcc	%g5, 0xff, %g0
+	be	1f
+	 add	%o4, 1, %o4
+	andcc	%o5, 0xff, %g0
+	bne	81b
+	 sub	%o0, %o1, %g2
+
+	add	%o4, 1, %o4
+1:
+	retl
+	 sub	%o4, %o1, %o0
+
+	.section .fixup,#alloc,#execinstr
+	.align	4
+9:
+	retl
+	 clr	%o0
+
+	.section __ex_table,#alloc
+	.align	4
+
+	.word	10b, 9b
+	.word	11b, 9b
+	.word	12b, 9b
+	.word	13b, 9b
diff --git a/arch/sparc/lib/strncmp.S b/arch/sparc/lib/strncmp.S
new file mode 100644
index 0000000..6156268
--- /dev/null
+++ b/arch/sparc/lib/strncmp.S
@@ -0,0 +1,118 @@
+/* $Id: strncmp.S,v 1.2 1996/09/09 02:47:20 davem Exp $
+ * strncmp.S: Hand optimized Sparc assembly of GCC output from GNU libc
+ *            generic strncmp routine.
+ */
+
+	.text
+	.align 4
+	.global __strncmp, strncmp
+__strncmp:
+strncmp:
+	mov	%o0, %g3
+	mov	0, %o3
+
+	cmp	%o2, 3
+	ble	7f
+	 mov	0, %g2
+
+	sra	%o2, 2, %o4
+	ldub	[%g3], %o3
+
+0:
+	ldub	[%o1], %g2
+	add	%g3, 1, %g3
+	and	%o3, 0xff, %o0
+
+	cmp	%o0, 0
+	be	8f
+	 add	%o1, 1, %o1
+
+	cmp	%o0, %g2
+	be,a	1f
+	 ldub	[%g3], %o3
+
+	retl
+	 sub	%o0, %g2, %o0
+
+1:
+	ldub	[%o1], %g2
+	add	%g3,1, %g3
+	and	%o3, 0xff, %o0
+
+	cmp	%o0, 0
+	be	8f
+	 add	%o1, 1, %o1
+
+	cmp	%o0, %g2
+	be,a	1f
+	 ldub	[%g3], %o3
+
+	retl
+	 sub	%o0, %g2, %o0
+
+1:
+	ldub	[%o1], %g2
+	add	%g3, 1, %g3
+	and	%o3, 0xff, %o0
+
+	cmp	%o0, 0
+	be	8f
+	 add	%o1, 1, %o1
+
+	cmp	%o0, %g2
+	be,a	1f
+	 ldub	[%g3], %o3
+
+	retl
+	 sub	%o0, %g2, %o0
+
+1:
+	ldub	[%o1], %g2
+	add	%g3, 1, %g3
+	and	%o3, 0xff, %o0
+
+	cmp	%o0, 0
+	be	8f
+	 add	%o1, 1, %o1
+
+	cmp	%o0, %g2
+	be	1f
+	 add	%o4, -1, %o4
+
+	retl
+	 sub	%o0, %g2, %o0
+
+1:
+
+	cmp	%o4, 0
+	bg,a	0b
+	 ldub	[%g3], %o3
+
+	b	7f
+	 and	%o2, 3, %o2
+
+9:
+	ldub	[%o1], %g2
+	add	%g3, 1, %g3
+	and	%o3, 0xff, %o0
+
+	cmp	%o0, 0
+	be	8f
+	 add	%o1, 1, %o1
+
+	cmp	%o0, %g2
+	be	7f
+	 add	%o2, -1, %o2
+
+8:
+	retl
+	 sub	%o0, %g2, %o0
+
+7:
+	cmp	%o2, 0
+	bg,a	9b
+	 ldub	[%g3], %o3
+
+	and	%g2, 0xff, %o0
+	retl
+	 sub	%o3, %o0, %o0
diff --git a/arch/sparc/lib/strncpy_from_user.S b/arch/sparc/lib/strncpy_from_user.S
new file mode 100644
index 0000000..d771989
--- /dev/null
+++ b/arch/sparc/lib/strncpy_from_user.S
@@ -0,0 +1,47 @@
+/* strncpy_from_user.S: Sparc strncpy from userspace.
+ *
+ *  Copyright(C) 1996 David S. Miller
+ */
+
+#include <asm/ptrace.h>
+#include <asm/errno.h>
+
+	.text
+	.align	4
+
+	/* Must return:
+	 *
+	 * -EFAULT		for an exception
+	 * count		if we hit the buffer limit
+	 * bytes copied		if we hit a null byte
+	 */
+
+	.globl	__strncpy_from_user
+__strncpy_from_user:
+	/* %o0=dest, %o1=src, %o2=count */
+	mov	%o2, %o3
+1:
+	subcc	%o2, 1, %o2
+	bneg	2f
+	 nop
+10:
+	ldub	[%o1], %o4
+	add	%o0, 1, %o0
+	cmp	%o4, 0
+	add	%o1, 1, %o1
+	bne	1b
+	 stb	%o4, [%o0 - 1]
+2:
+	add	%o2, 1, %o0
+	retl
+	 sub	%o3, %o0, %o0
+
+	.section .fixup,#alloc,#execinstr
+	.align	4
+4:
+	retl
+	 mov	-EFAULT, %o0
+
+	.section __ex_table,#alloc
+	.align	4
+	.word	10b, 4b
diff --git a/arch/sparc/lib/udiv.S b/arch/sparc/lib/udiv.S
new file mode 100644
index 0000000..2abfc6b
--- /dev/null
+++ b/arch/sparc/lib/udiv.S
@@ -0,0 +1,355 @@
+/* $Id: udiv.S,v 1.4 1996/09/30 02:22:38 davem Exp $
+ * udiv.S:      This routine was taken from glibc-1.09 and is covered
+ *              by the GNU Library General Public License Version 2.
+ */
+
+
+/* This file is generated from divrem.m4; DO NOT EDIT! */
+/*
+ * Division and remainder, from Appendix E of the Sparc Version 8
+ * Architecture Manual, with fixes from Gordon Irlam.
+ */
+
+/*
+ * Input: dividend and divisor in %o0 and %o1 respectively.
+ *
+ * m4 parameters:
+ *  .udiv	name of function to generate
+ *  div		div=div => %o0 / %o1; div=rem => %o0 % %o1
+ *  false		false=true => signed; false=false => unsigned
+ *
+ * Algorithm parameters:
+ *  N		how many bits per iteration we try to get (4)
+ *  WORDSIZE	total number of bits (32)
+ *
+ * Derived constants:
+ *  TOPBITS	number of bits in the top decade of a number
+ *
+ * Important variables:
+ *  Q		the partial quotient under development (initially 0)
+ *  R		the remainder so far, initially the dividend
+ *  ITER	number of main division loop iterations required;
+ *		equal to ceil(log2(quotient) / N).  Note that this
+ *		is the log base (2^N) of the quotient.
+ *  V		the current comparand, initially divisor*2^(ITER*N-1)
+ *
+ * Cost:
+ *  Current estimate for non-large dividend is
+ *	ceil(log2(quotient) / N) * (10 + 7N/2) + C
+ *  A large dividend is one greater than 2^(31-TOPBITS) and takes a
+ *  different path, as the upper bits of the quotient must be developed
+ *  one bit at a time.
+ */
+
+
+	.globl .udiv
+.udiv:
+
+	! Ready to divide.  Compute size of quotient; scale comparand.
+	orcc	%o1, %g0, %o5
+	bne	1f
+	 mov	%o0, %o3
+
+		! Divide by zero trap.  If it returns, return 0 (about as
+		! wrong as possible, but that is what SunOS does...).
+		ta	ST_DIV0
+		retl
+		 clr	%o0
+
+1:
+	cmp	%o3, %o5			! if %o1 exceeds %o0, done
+	blu	Lgot_result		! (and algorithm fails otherwise)
+	 clr	%o2
+
+	sethi	%hi(1 << (32 - 4 - 1)), %g1
+
+	cmp	%o3, %g1
+	blu	Lnot_really_big
+	 clr	%o4
+
+	! Here the dividend is >= 2**(31-N) or so.  We must be careful here,
+	! as our usual N-at-a-shot divide step will cause overflow and havoc.
+	! The number of bits in the result here is N*ITER+SC, where SC <= N.
+	! Compute ITER in an unorthodox manner: know we need to shift V into
+	! the top decade: so do not even bother to compare to R.
+	1:
+		cmp	%o5, %g1
+		bgeu	3f
+		 mov	1, %g7
+
+		sll	%o5, 4, %o5
+
+		b	1b
+		 add	%o4, 1, %o4
+
+	! Now compute %g7.
+	2:
+		addcc	%o5, %o5, %o5
+		bcc	Lnot_too_big
+		 add	%g7, 1, %g7
+
+		! We get here if the %o1 overflowed while shifting.
+		! This means that %o3 has the high-order bit set.
+		! Restore %o5 and subtract from %o3.
+		sll	%g1, 4, %g1	! high order bit
+		srl	%o5, 1, %o5		! rest of %o5
+		add	%o5, %g1, %o5
+
+		b	Ldo_single_div
+		 sub	%g7, 1, %g7
+
+	Lnot_too_big:
+	3:
+		cmp	%o5, %o3
+		blu	2b
+		 nop
+
+		be	Ldo_single_div
+		 nop
+	/* NB: these are commented out in the V8-Sparc manual as well */
+	/* (I do not understand this) */
+	! %o5 > %o3: went too far: back up 1 step
+	!	srl	%o5, 1, %o5
+	!	dec	%g7
+	! do single-bit divide steps
+	!
+	! We have to be careful here.  We know that %o3 >= %o5, so we can do the
+	! first divide step without thinking.  BUT, the others are conditional,
+	! and are only done if %o3 >= 0.  Because both %o3 and %o5 may have the high-
+	! order bit set in the first step, just falling into the regular
+	! division loop will mess up the first time around.
+	! So we unroll slightly...
+	Ldo_single_div:
+		subcc	%g7, 1, %g7
+		bl	Lend_regular_divide
+		 nop
+
+		sub	%o3, %o5, %o3
+		mov	1, %o2
+
+		b	Lend_single_divloop
+		 nop
+	Lsingle_divloop:
+		sll	%o2, 1, %o2
+		bl	1f
+		 srl	%o5, 1, %o5
+		! %o3 >= 0
+		sub	%o3, %o5, %o3
+		b	2f
+		 add	%o2, 1, %o2
+	1:	! %o3 < 0
+		add	%o3, %o5, %o3
+		sub	%o2, 1, %o2
+	2:
+	Lend_single_divloop:
+		subcc	%g7, 1, %g7
+		bge	Lsingle_divloop
+		 tst	%o3
+
+		b,a	Lend_regular_divide
+
+Lnot_really_big:
+1:
+	sll	%o5, 4, %o5
+
+	cmp	%o5, %o3
+	bleu	1b
+	 addcc	%o4, 1, %o4
+
+	be	Lgot_result
+	 sub	%o4, 1, %o4
+
+	tst	%o3	! set up for initial iteration
+Ldivloop:
+	sll	%o2, 4, %o2
+		! depth 1, accumulated bits 0
+	bl	L.1.16
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+			! depth 2, accumulated bits 1
+	bl	L.2.17
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+			! depth 3, accumulated bits 3
+	bl	L.3.19
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+			! depth 4, accumulated bits 7
+	bl	L.4.23
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (7*2+1), %o2
+
+L.4.23:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (7*2-1), %o2
+
+L.3.19:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+			! depth 4, accumulated bits 5
+	bl	L.4.21
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (5*2+1), %o2
+
+L.4.21:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (5*2-1), %o2
+
+L.2.17:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+			! depth 3, accumulated bits 1
+	bl	L.3.17
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+			! depth 4, accumulated bits 3
+	bl	L.4.19
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (3*2+1), %o2
+
+L.4.19:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (3*2-1), %o2
+
+L.3.17:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+			! depth 4, accumulated bits 1
+	bl	L.4.17
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (1*2+1), %o2
+
+L.4.17:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (1*2-1), %o2
+
+L.1.16:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+			! depth 2, accumulated bits -1
+	bl	L.2.15
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+			! depth 3, accumulated bits -1
+	bl	L.3.15
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+			! depth 4, accumulated bits -1
+	bl	L.4.15
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (-1*2+1), %o2
+
+L.4.15:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (-1*2-1), %o2
+
+L.3.15:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+			! depth 4, accumulated bits -3
+	bl	L.4.13
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (-3*2+1), %o2
+
+L.4.13:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (-3*2-1), %o2
+
+L.2.15:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+			! depth 3, accumulated bits -3
+	bl	L.3.13
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+			! depth 4, accumulated bits -5
+	bl	L.4.11
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (-5*2+1), %o2
+
+L.4.11:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (-5*2-1), %o2
+
+L.3.13:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+			! depth 4, accumulated bits -7
+	bl	L.4.9
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (-7*2+1), %o2
+
+L.4.9:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (-7*2-1), %o2
+
+	9:
+Lend_regular_divide:
+	subcc	%o4, 1, %o4
+	bge	Ldivloop
+	 tst	%o3
+
+	bl,a	Lgot_result
+	! non-restoring fixup here (one instruction only!)
+	sub	%o2, 1, %o2
+
+Lgot_result:
+
+	retl
+	 mov %o2, %o0
+
+	.globl	.udiv_patch
+.udiv_patch:
+	wr	%g0, 0x0, %y
+	nop
+	nop
+	retl
+	 udiv	%o0, %o1, %o0
+	nop
diff --git a/arch/sparc/lib/udivdi3.S b/arch/sparc/lib/udivdi3.S
new file mode 100644
index 0000000..b430f1f
--- /dev/null
+++ b/arch/sparc/lib/udivdi3.S
@@ -0,0 +1,258 @@
+/* Copyright (C) 1989, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING.  If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA.  */
+
+	.text
+	.align 4
+	.globl __udivdi3
+__udivdi3:
+	save %sp,-104,%sp
+	mov %i3,%o3
+	cmp %i2,0
+	bne .LL40
+	mov %i1,%i3
+	cmp %o3,%i0
+	bleu .LL41
+	mov %i3,%o1
+	! Inlined udiv_qrnnd
+	mov	32,%g1
+	subcc	%i0,%o3,%g0
+1:	bcs	5f
+	 addxcc %o1,%o1,%o1	! shift n1n0 and a q-bit in lsb
+	sub	%i0,%o3,%i0	! this kills msb of n
+	addx	%i0,%i0,%i0	! so this cannot give carry
+	subcc	%g1,1,%g1
+2:	bne	1b
+	 subcc	%i0,%o3,%g0
+	bcs	3f
+	 addxcc %o1,%o1,%o1	! shift n1n0 and a q-bit in lsb
+	b	3f
+	 sub	%i0,%o3,%i0	! this kills msb of n
+4:	sub	%i0,%o3,%i0
+5:	addxcc	%i0,%i0,%i0
+	bcc	2b
+	 subcc	%g1,1,%g1
+! Got carry from n.  Subtract next step to cancel this carry.
+	bne	4b
+	 addcc	%o1,%o1,%o1	! shift n1n0 and a 0-bit in lsb
+	sub	%i0,%o3,%i0
+3:	xnor	%o1,0,%o1
+	! End of inline udiv_qrnnd
+	b .LL45
+	mov 0,%o2
+.LL41:
+	cmp %o3,0
+	bne .LL77
+	mov %i0,%o2
+	mov 1,%o0
+	call .udiv,0
+	mov 0,%o1
+	mov %o0,%o3
+	mov %i0,%o2
+.LL77:
+	mov 0,%o4
+	! Inlined udiv_qrnnd
+	mov	32,%g1
+	subcc	%o4,%o3,%g0
+1:	bcs	5f
+	 addxcc %o2,%o2,%o2	! shift n1n0 and a q-bit in lsb
+	sub	%o4,%o3,%o4	! this kills msb of n
+	addx	%o4,%o4,%o4	! so this cannot give carry
+	subcc	%g1,1,%g1
+2:	bne	1b
+	 subcc	%o4,%o3,%g0
+	bcs	3f
+	 addxcc %o2,%o2,%o2	! shift n1n0 and a q-bit in lsb
+	b	3f
+	 sub	%o4,%o3,%o4	! this kills msb of n
+4:	sub	%o4,%o3,%o4
+5:	addxcc	%o4,%o4,%o4
+	bcc	2b
+	 subcc	%g1,1,%g1
+! Got carry from n.  Subtract next step to cancel this carry.
+	bne	4b
+	 addcc	%o2,%o2,%o2	! shift n1n0 and a 0-bit in lsb
+	sub	%o4,%o3,%o4
+3:	xnor	%o2,0,%o2
+	! End of inline udiv_qrnnd
+	mov %o4,%i0
+	mov %i3,%o1
+	! Inlined udiv_qrnnd
+	mov	32,%g1
+	subcc	%i0,%o3,%g0
+1:	bcs	5f
+	 addxcc %o1,%o1,%o1	! shift n1n0 and a q-bit in lsb
+	sub	%i0,%o3,%i0	! this kills msb of n
+	addx	%i0,%i0,%i0	! so this cannot give carry
+	subcc	%g1,1,%g1
+2:	bne	1b
+	 subcc	%i0,%o3,%g0
+	bcs	3f
+	 addxcc %o1,%o1,%o1	! shift n1n0 and a q-bit in lsb
+	b	3f
+	 sub	%i0,%o3,%i0	! this kills msb of n
+4:	sub	%i0,%o3,%i0
+5:	addxcc	%i0,%i0,%i0
+	bcc	2b
+	 subcc	%g1,1,%g1
+! Got carry from n.  Subtract next step to cancel this carry.
+	bne	4b
+	 addcc	%o1,%o1,%o1	! shift n1n0 and a 0-bit in lsb
+	sub	%i0,%o3,%i0
+3:	xnor	%o1,0,%o1
+	! End of inline udiv_qrnnd
+	b .LL78
+	mov %o1,%l1
+.LL40:
+	cmp %i2,%i0
+	bleu .LL46
+	sethi %hi(65535),%o0
+	b .LL73
+	mov 0,%o1
+.LL46:
+	or %o0,%lo(65535),%o0
+	cmp %i2,%o0
+	bgu .LL53
+	mov %i2,%o1
+	cmp %i2,256
+	addx %g0,-1,%o0
+	b .LL59
+	and %o0,8,%o2
+.LL53:
+	sethi %hi(16777215),%o0
+	or %o0,%lo(16777215),%o0
+	cmp %o1,%o0
+	bgu .LL59
+	mov 24,%o2
+	mov 16,%o2
+.LL59:
+	srl %o1,%o2,%o1
+	sethi %hi(__clz_tab),%o0
+	or %o0,%lo(__clz_tab),%o0
+	ldub [%o1+%o0],%o0
+	add %o0,%o2,%o0
+	mov 32,%o1
+	subcc %o1,%o0,%o2
+	bne,a .LL67
+	mov 32,%o0
+	cmp %i0,%i2
+	bgu .LL69
+	cmp %i3,%o3
+	blu .LL73
+	mov 0,%o1
+.LL69:
+	b .LL73
+	mov 1,%o1
+.LL67:
+	sub %o0,%o2,%o0
+	sll %i2,%o2,%i2
+	srl %o3,%o0,%o1
+	or %i2,%o1,%i2
+	sll %o3,%o2,%o3
+	srl %i0,%o0,%o1
+	sll %i0,%o2,%i0
+	srl %i3,%o0,%o0
+	or %i0,%o0,%i0
+	sll %i3,%o2,%i3
+	mov %i0,%o5
+	mov %o1,%o4
+	! Inlined udiv_qrnnd
+	mov	32,%g1
+	subcc	%o4,%i2,%g0
+1:	bcs	5f
+	 addxcc %o5,%o5,%o5	! shift n1n0 and a q-bit in lsb
+	sub	%o4,%i2,%o4	! this kills msb of n
+	addx	%o4,%o4,%o4	! so this cannot give carry
+	subcc	%g1,1,%g1
+2:	bne	1b
+	 subcc	%o4,%i2,%g0
+	bcs	3f
+	 addxcc %o5,%o5,%o5	! shift n1n0 and a q-bit in lsb
+	b	3f
+	 sub	%o4,%i2,%o4	! this kills msb of n
+4:	sub	%o4,%i2,%o4
+5:	addxcc	%o4,%o4,%o4
+	bcc	2b
+	 subcc	%g1,1,%g1
+! Got carry from n.  Subtract next step to cancel this carry.
+	bne	4b
+	 addcc	%o5,%o5,%o5	! shift n1n0 and a 0-bit in lsb
+	sub	%o4,%i2,%o4
+3:	xnor	%o5,0,%o5
+	! End of inline udiv_qrnnd
+	mov %o4,%i0
+	mov %o5,%o1
+	! Inlined umul_ppmm
+	wr	%g0,%o1,%y	! SPARC has 0-3 delay insn after a wr
+	sra	%o3,31,%g2	! Do not move this insn
+	and	%o1,%g2,%g2	! Do not move this insn
+	andcc	%g0,0,%g1	! Do not move this insn
+	mulscc	%g1,%o3,%g1
+	mulscc	%g1,%o3,%g1
+	mulscc	%g1,%o3,%g1
+	mulscc	%g1,%o3,%g1
+	mulscc	%g1,%o3,%g1
+	mulscc	%g1,%o3,%g1
+	mulscc	%g1,%o3,%g1
+	mulscc	%g1,%o3,%g1
+	mulscc	%g1,%o3,%g1
+	mulscc	%g1,%o3,%g1
+	mulscc	%g1,%o3,%g1
+	mulscc	%g1,%o3,%g1
+	mulscc	%g1,%o3,%g1
+	mulscc	%g1,%o3,%g1
+	mulscc	%g1,%o3,%g1
+	mulscc	%g1,%o3,%g1
+	mulscc	%g1,%o3,%g1
+	mulscc	%g1,%o3,%g1
+	mulscc	%g1,%o3,%g1
+	mulscc	%g1,%o3,%g1
+	mulscc	%g1,%o3,%g1
+	mulscc	%g1,%o3,%g1
+	mulscc	%g1,%o3,%g1
+	mulscc	%g1,%o3,%g1
+	mulscc	%g1,%o3,%g1
+	mulscc	%g1,%o3,%g1
+	mulscc	%g1,%o3,%g1
+	mulscc	%g1,%o3,%g1
+	mulscc	%g1,%o3,%g1
+	mulscc	%g1,%o3,%g1
+	mulscc	%g1,%o3,%g1
+	mulscc	%g1,%o3,%g1
+	mulscc	%g1,0,%g1
+	add	%g1,%g2,%o0
+	rd	%y,%o2
+	cmp %o0,%i0
+	bgu,a .LL73
+	add %o1,-1,%o1
+	bne,a .LL45
+	mov 0,%o2
+	cmp %o2,%i3
+	bleu .LL45
+	mov 0,%o2
+	add %o1,-1,%o1
+.LL73:
+	mov 0,%o2
+.LL45:
+	mov %o1,%l1
+.LL78:
+	mov %o2,%l0
+	mov %l0,%i0
+	mov %l1,%i1
+	ret
+	restore
diff --git a/arch/sparc/lib/umul.S b/arch/sparc/lib/umul.S
new file mode 100644
index 0000000..a784720
--- /dev/null
+++ b/arch/sparc/lib/umul.S
@@ -0,0 +1,169 @@
+/* $Id: umul.S,v 1.4 1996/09/30 02:22:39 davem Exp $
+ * umul.S:      This routine was taken from glibc-1.09 and is covered
+ *              by the GNU Library General Public License Version 2.
+ */
+
+
+/*
+ * Unsigned multiply.  Returns %o0 * %o1 in %o1%o0 (i.e., %o1 holds the
+ * upper 32 bits of the 64-bit product).
+ *
+ * This code optimizes short (less than 13-bit) multiplies.  Short
+ * multiplies require 25 instruction cycles, and long ones require
+ * 45 instruction cycles.
+ *
+ * On return, overflow has occurred (%o1 is not zero) if and only if
+ * the Z condition code is clear, allowing, e.g., the following:
+ *
+ *	call	.umul
+ *	nop
+ *	bnz	overflow	(or tnz)
+ */
+
+	.globl .umul
+.umul:
+	or	%o0, %o1, %o4
+	mov	%o0, %y		! multiplier -> Y
+
+	andncc	%o4, 0xfff, %g0	! test bits 12..31 of *both* args
+	be	Lmul_shortway	! if zero, can do it the short way
+	 andcc	%g0, %g0, %o4	! zero the partial product and clear N and V
+
+	/*
+	 * Long multiply.  32 steps, followed by a final shift step.
+	 */
+	mulscc	%o4, %o1, %o4	! 1
+	mulscc	%o4, %o1, %o4	! 2
+	mulscc	%o4, %o1, %o4	! 3
+	mulscc	%o4, %o1, %o4	! 4
+	mulscc	%o4, %o1, %o4	! 5
+	mulscc	%o4, %o1, %o4	! 6
+	mulscc	%o4, %o1, %o4	! 7
+	mulscc	%o4, %o1, %o4	! 8
+	mulscc	%o4, %o1, %o4	! 9
+	mulscc	%o4, %o1, %o4	! 10
+	mulscc	%o4, %o1, %o4	! 11
+	mulscc	%o4, %o1, %o4	! 12
+	mulscc	%o4, %o1, %o4	! 13
+	mulscc	%o4, %o1, %o4	! 14
+	mulscc	%o4, %o1, %o4	! 15
+	mulscc	%o4, %o1, %o4	! 16
+	mulscc	%o4, %o1, %o4	! 17
+	mulscc	%o4, %o1, %o4	! 18
+	mulscc	%o4, %o1, %o4	! 19
+	mulscc	%o4, %o1, %o4	! 20
+	mulscc	%o4, %o1, %o4	! 21
+	mulscc	%o4, %o1, %o4	! 22
+	mulscc	%o4, %o1, %o4	! 23
+	mulscc	%o4, %o1, %o4	! 24
+	mulscc	%o4, %o1, %o4	! 25
+	mulscc	%o4, %o1, %o4	! 26
+	mulscc	%o4, %o1, %o4	! 27
+	mulscc	%o4, %o1, %o4	! 28
+	mulscc	%o4, %o1, %o4	! 29
+	mulscc	%o4, %o1, %o4	! 30
+	mulscc	%o4, %o1, %o4	! 31
+	mulscc	%o4, %o1, %o4	! 32
+	mulscc	%o4, %g0, %o4	! final shift
+
+
+	/*
+	 * Normally, with the shift-and-add approach, if both numbers are
+	 * positive you get the correct result.  With 32-bit two's-complement
+	 * numbers, -x is represented as
+	 *
+	 *		  x		    32
+	 *	( 2  -  ------ ) mod 2  *  2
+	 *		   32
+	 *		  2
+	 *
+	 * (the `mod 2' subtracts 1 from 1.bbbb).  To avoid lots of 2^32s,
+	 * we can treat this as if the radix point were just to the left
+	 * of the sign bit (multiply by 2^32), and get
+	 *
+	 *	-x  =  (2 - x) mod 2
+	 *
+	 * Then, ignoring the `mod 2's for convenience:
+	 *
+	 *   x *  y	= xy
+	 *  -x *  y	= 2y - xy
+	 *   x * -y	= 2x - xy
+	 *  -x * -y	= 4 - 2x - 2y + xy
+	 *
+	 * For signed multiplies, we subtract (x << 32) from the partial
+	 * product to fix this problem for negative multipliers (see mul.s).
+	 * Because of the way the shift into the partial product is calculated
+	 * (N xor V), this term is automatically removed for the multiplicand,
+	 * so we don't have to adjust.
+	 *
+	 * But for unsigned multiplies, the high order bit wasn't a sign bit,
+	 * and the correction is wrong.  So for unsigned multiplies where the
+	 * high order bit is one, we end up with xy - (y << 32).  To fix it
+	 * we add y << 32.
+	 */
+#if 0
+	tst	%o1
+	bl,a	1f		! if %o1 < 0 (high order bit = 1),
+	 add	%o4, %o0, %o4	! %o4 += %o0 (add y to upper half)
+
+1:
+	rd	%y, %o0		! get lower half of product
+	retl
+	 addcc	%o4, %g0, %o1	! put upper half in place and set Z for %o1==0
+#else
+	/* Faster code from tege@sics.se.  */
+	sra	%o1, 31, %o2	! make mask from sign bit
+	and	%o0, %o2, %o2	! %o2 = 0 or %o0, depending on sign of %o1
+	rd	%y, %o0		! get lower half of product
+	retl
+	 addcc	%o4, %o2, %o1	! add compensation and put upper half in place
+#endif
+
+Lmul_shortway:
+	/*
+	 * Short multiply.  12 steps, followed by a final shift step.
+	 * The resulting bits are off by 12 and (32-12) = 20 bit positions,
+	 * but there is no problem with %o0 being negative (unlike above),
+	 * and overflow is impossible (the answer is at most 24 bits long).
+	 */
+	mulscc	%o4, %o1, %o4	! 1
+	mulscc	%o4, %o1, %o4	! 2
+	mulscc	%o4, %o1, %o4	! 3
+	mulscc	%o4, %o1, %o4	! 4
+	mulscc	%o4, %o1, %o4	! 5
+	mulscc	%o4, %o1, %o4	! 6
+	mulscc	%o4, %o1, %o4	! 7
+	mulscc	%o4, %o1, %o4	! 8
+	mulscc	%o4, %o1, %o4	! 9
+	mulscc	%o4, %o1, %o4	! 10
+	mulscc	%o4, %o1, %o4	! 11
+	mulscc	%o4, %o1, %o4	! 12
+	mulscc	%o4, %g0, %o4	! final shift
+
+	/*
+	 * %o4 has 20 of the bits that should be in the result; %y has
+	 * the bottom 12 (as %y's top 12).  That is:
+	 *
+	 *	  %o4		    %y
+	 * +----------------+----------------+
+	 * | -12- |   -20-  | -12- |   -20-  |
+	 * +------(---------+------)---------+
+	 *	   -----result-----
+	 *
+	 * The 12 bits of %o4 left of the `result' area are all zero;
+	 * in fact, all top 20 bits of %o4 are zero.
+	 */
+
+	rd	%y, %o5
+	sll	%o4, 12, %o0	! shift middle bits left 12
+	srl	%o5, 20, %o5	! shift low bits right 20
+	or	%o5, %o0, %o0
+	retl
+	 addcc	%g0, %g0, %o1	! %o1 = zero, and set Z
+
+	.globl	.umul_patch
+.umul_patch:
+	umul	%o0, %o1, %o0
+	retl
+	 rd	%y, %o1
+	nop
diff --git a/arch/sparc/lib/urem.S b/arch/sparc/lib/urem.S
new file mode 100644
index 0000000..ec7f0c5
--- /dev/null
+++ b/arch/sparc/lib/urem.S
@@ -0,0 +1,355 @@
+/* $Id: urem.S,v 1.4 1996/09/30 02:22:42 davem Exp $
+ * urem.S:      This routine was taken from glibc-1.09 and is covered
+ *              by the GNU Library General Public License Version 2.
+ */
+
+/* This file is generated from divrem.m4; DO NOT EDIT! */
+/*
+ * Division and remainder, from Appendix E of the Sparc Version 8
+ * Architecture Manual, with fixes from Gordon Irlam.
+ */
+
+/*
+ * Input: dividend and divisor in %o0 and %o1 respectively.
+ *
+ * m4 parameters:
+ *  .urem	name of function to generate
+ *  rem		rem=div => %o0 / %o1; rem=rem => %o0 % %o1
+ *  false		false=true => signed; false=false => unsigned
+ *
+ * Algorithm parameters:
+ *  N		how many bits per iteration we try to get (4)
+ *  WORDSIZE	total number of bits (32)
+ *
+ * Derived constants:
+ *  TOPBITS	number of bits in the top decade of a number
+ *
+ * Important variables:
+ *  Q		the partial quotient under development (initially 0)
+ *  R		the remainder so far, initially the dividend
+ *  ITER	number of main division loop iterations required;
+ *		equal to ceil(log2(quotient) / N).  Note that this
+ *		is the log base (2^N) of the quotient.
+ *  V		the current comparand, initially divisor*2^(ITER*N-1)
+ *
+ * Cost:
+ *  Current estimate for non-large dividend is
+ *	ceil(log2(quotient) / N) * (10 + 7N/2) + C
+ *  A large dividend is one greater than 2^(31-TOPBITS) and takes a
+ *  different path, as the upper bits of the quotient must be developed
+ *  one bit at a time.
+ */
+
+	.globl .urem
+.urem:
+
+	! Ready to divide.  Compute size of quotient; scale comparand.
+	orcc	%o1, %g0, %o5
+	bne	1f
+	 mov	%o0, %o3
+
+		! Divide by zero trap.  If it returns, return 0 (about as
+		! wrong as possible, but that is what SunOS does...).
+		ta	ST_DIV0
+		retl
+		 clr	%o0
+
+1:
+	cmp	%o3, %o5			! if %o1 exceeds %o0, done
+	blu	Lgot_result		! (and algorithm fails otherwise)
+	 clr	%o2
+
+	sethi	%hi(1 << (32 - 4 - 1)), %g1
+
+	cmp	%o3, %g1
+	blu	Lnot_really_big
+	 clr	%o4
+
+	! Here the dividend is >= 2**(31-N) or so.  We must be careful here,
+	! as our usual N-at-a-shot divide step will cause overflow and havoc.
+	! The number of bits in the result here is N*ITER+SC, where SC <= N.
+	! Compute ITER in an unorthodox manner: know we need to shift V into
+	! the top decade: so do not even bother to compare to R.
+	1:
+		cmp	%o5, %g1
+		bgeu	3f
+		 mov	1, %g7
+
+		sll	%o5, 4, %o5
+
+		b	1b
+		 add	%o4, 1, %o4
+
+	! Now compute %g7.
+	2:
+		addcc	%o5, %o5, %o5
+		bcc	Lnot_too_big
+		 add	%g7, 1, %g7
+
+		! We get here if the %o1 overflowed while shifting.
+		! This means that %o3 has the high-order bit set.
+		! Restore %o5 and subtract from %o3.
+		sll	%g1, 4, %g1	! high order bit
+		srl	%o5, 1, %o5		! rest of %o5
+		add	%o5, %g1, %o5
+
+		b	Ldo_single_div
+		 sub	%g7, 1, %g7
+
+	Lnot_too_big:
+	3:
+		cmp	%o5, %o3
+		blu	2b
+		 nop
+
+		be	Ldo_single_div
+		 nop
+	/* NB: these are commented out in the V8-Sparc manual as well */
+	/* (I do not understand this) */
+	! %o5 > %o3: went too far: back up 1 step
+	!	srl	%o5, 1, %o5
+	!	dec	%g7
+	! do single-bit divide steps
+	!
+	! We have to be careful here.  We know that %o3 >= %o5, so we can do the
+	! first divide step without thinking.  BUT, the others are conditional,
+	! and are only done if %o3 >= 0.  Because both %o3 and %o5 may have the high-
+	! order bit set in the first step, just falling into the regular
+	! division loop will mess up the first time around.
+	! So we unroll slightly...
+	Ldo_single_div:
+		subcc	%g7, 1, %g7
+		bl	Lend_regular_divide
+		 nop
+
+		sub	%o3, %o5, %o3
+		mov	1, %o2
+
+		b	Lend_single_divloop
+		 nop
+	Lsingle_divloop:
+		sll	%o2, 1, %o2
+		bl	1f
+		 srl	%o5, 1, %o5
+		! %o3 >= 0
+		sub	%o3, %o5, %o3
+		b	2f
+		 add	%o2, 1, %o2
+	1:	! %o3 < 0
+		add	%o3, %o5, %o3
+		sub	%o2, 1, %o2
+	2:
+	Lend_single_divloop:
+		subcc	%g7, 1, %g7
+		bge	Lsingle_divloop
+		 tst	%o3
+
+		b,a	Lend_regular_divide
+
+Lnot_really_big:
+1:
+	sll	%o5, 4, %o5
+
+	cmp	%o5, %o3
+	bleu	1b
+	 addcc	%o4, 1, %o4
+
+	be	Lgot_result
+	 sub	%o4, 1, %o4
+
+	tst	%o3	! set up for initial iteration
+Ldivloop:
+	sll	%o2, 4, %o2
+		! depth 1, accumulated bits 0
+	bl	L.1.16
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+			! depth 2, accumulated bits 1
+	bl	L.2.17
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+			! depth 3, accumulated bits 3
+	bl	L.3.19
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+			! depth 4, accumulated bits 7
+	bl	L.4.23
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (7*2+1), %o2
+
+L.4.23:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (7*2-1), %o2
+
+L.3.19:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+			! depth 4, accumulated bits 5
+	bl	L.4.21
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (5*2+1), %o2
+
+L.4.21:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (5*2-1), %o2
+
+L.2.17:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+			! depth 3, accumulated bits 1
+	bl	L.3.17
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+			! depth 4, accumulated bits 3
+	bl	L.4.19
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (3*2+1), %o2
+
+L.4.19:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (3*2-1), %o2
+
+L.3.17:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+			! depth 4, accumulated bits 1
+	bl	L.4.17
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (1*2+1), %o2
+	
+L.4.17:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (1*2-1), %o2
+
+L.1.16:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+			! depth 2, accumulated bits -1
+	bl	L.2.15
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+			! depth 3, accumulated bits -1
+	bl	L.3.15
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+			! depth 4, accumulated bits -1
+	bl	L.4.15
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (-1*2+1), %o2
+
+L.4.15:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (-1*2-1), %o2
+
+L.3.15:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+			! depth 4, accumulated bits -3
+	bl	L.4.13
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (-3*2+1), %o2
+
+L.4.13:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (-3*2-1), %o2
+
+L.2.15:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+			! depth 3, accumulated bits -3
+	bl	L.3.13
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+			! depth 4, accumulated bits -5
+	bl	L.4.11
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (-5*2+1), %o2
+	
+L.4.11:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (-5*2-1), %o2
+
+L.3.13:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+			! depth 4, accumulated bits -7
+	bl	L.4.9
+	 srl	%o5,1,%o5
+	! remainder is positive
+	subcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (-7*2+1), %o2
+
+L.4.9:
+	! remainder is negative
+	addcc	%o3,%o5,%o3
+	b	9f
+	 add	%o2, (-7*2-1), %o2
+
+	9:
+Lend_regular_divide:
+	subcc	%o4, 1, %o4
+	bge	Ldivloop
+	 tst	%o3
+
+	bl,a	Lgot_result
+	! non-restoring fixup here (one instruction only!)
+	add	%o3, %o1, %o3
+
+Lgot_result:
+
+	retl
+	 mov %o3, %o0
+
+	.globl	.urem_patch
+.urem_patch:
+	wr	%g0, 0x0, %y
+	nop
+	nop
+	nop
+	udiv	%o0, %o1, %o2
+	umul	%o2, %o1, %o2
+	retl
+	 sub	%o0, %o2, %o0